# 抓取当当图书排行榜  前10页 搜索数据

import requests
from pyquery import PyQuery as pq

import time

import re
from xlsxwriter import Workbook
from xlsxwriter.worksheet import Worksheet
import os

urls = ['http://search.dangdang.com/?key=python&SearchFromTop=1&catalog=&page_index={}'.format(str(page)) for page in
        range(1, 6)]

session = requests.session()

header = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Host": "search.dangdang.com",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
}

img_header = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}


def getHtml(url):
    response = session.get(url, headers=header)
    if (response.status_code == requests.codes.ok):
        html = response.content.decode("gbk")
        with open("book.html", "w", encoding="utf-8") as fp:
            fp.write(html)
        return html
    else:
        return ''


def parseHtml(html):
    doc = pq(html)
    li = doc(".shoplist ul>li")
    for item in li.items():
        img = ''
        img_title = ''
        if (len(item("a>img")) > 0):
            if (item("a>img").attr("data-original") is None):
                img = item("a>img").attr("src")
                img_title = img.split("/")[-1]
            else:
                img = item("a>img").attr("data-original")
                img_title = img.split("/")[-1]
        if img != '' and not os.path.exists("image/" + img_title):
            ims_response = requests.get("http:" + img, headers=img_header)
            if ims_response.status_code == requests.codes.ok:
                with open("image/" + img_title, "wb") as img_fp:
                    img_fp.write(ims_response.content)
        discount = ''
        if len(item(".search_discount")) > 0:
            g = re.search(r'\((.*?)\)', item(".search_discount").text().strip(), re.S)
            if g is not None:
                discount = g.group(1)
        book_info = item("p.search_book_author")
        author = book_info("a:first-child").attr("title")
        date = book_info("span:nth-child(2)").text()[1:]
        pub_house = book_info("[name='P_cbs']").attr("title")
        comment_number = item('.search_comment_num').text()[:-3]

        yield {
            "href": item("p.name a").attr("href").strip(),
            "title": item("p.name a").attr("title").strip(),
            "price": item(".search_now_price").text()[1:],
            "pre_price": item(".search_pre_price").text()[1:],
            "discount": discount,
            "img": img,
            "img_title": img_title,
            "author": author,
            'pub_date': date,
            "pub_house": pub_house,
            "detail": item(".detail").text(),
            "commen_num": comment_number
        }


def exec():
    header = [
        {"key": "img", "val": "封面图", "type": "image"},
        {"key": "title", "val": "书名", "type": "text"},
        {"key": "author", "val": "作者", "type": "text"},
        {"key": "pub_house", "val": "出版社", "type": "text"},
        {"key": "pub_date", "val": "出版时间", "type": "text"},
        {"key": "price", "val": "售价", "type": "text"},
        {"key": "pre_price", "val": "原价", "type": "text"},
        {"key": "discount", "val": "折扣", "type": "text"},
        {"key": "commen_num", "val": "评论数", "type": "text"},
        {"key": "detail", "val": "详情", "type": "text"},
        {"key": "href", "val": "详细链接", "type": "text"}
    ]
    row_index = 0
    excel = Workbook("当当图书.xls")
    sheet: Worksheet = excel.add_worksheet("当当图书搜索榜")
    for h_key, h_val in enumerate(header):
        sheet.write(row_index, h_key, h_val['val'])
    row_index = row_index + 1
    for url in urls:
        html = getHtml(url)
        if html != '':
            for row in parseHtml(html):
                for h_key, h_val in enumerate(header):
                    h_field = h_val['key']
                    h_type = h_val['type']
                    row_val = ''
                    if h_field in row.keys():
                        row_val = row[h_field]
                    if (h_type == 'image' and row_val != ''):
                        sheet.insert_image(row_index, h_key, "image/" + row['img_title'],
                                           {'x_scale': 0.3, 'y_scale': 0.3})
                    else:
                        sheet.write(row_index, h_key, row_val)
                sheet.set_row(row_index, 50)
                row_index = row_index + 1
        else:
            with open("error.html", "a+", encoding="utf-8") as error_fp:
                error_fp.write(html)
        time.sleep(2)
    excel.close()


exec()
