import requests
import re, json
from lxml import etree
import pandas as pd
import time
import numpy as np
# 评论动态加载


def entry(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
        'referer': 'https://search.jd.com/Search?keyword=python',
        'cookie': '加上'
    }
    res = requests.get(url, headers=headers)
    res.encoding = 'utf-8'
    html = res.text
    book_ls = json.loads(html[14:-1])['291']
    data_ls = []
    for book in book_ls:
        dic = dict()
        try:
            dic['书名'] = re.sub('<font class="skcolor_ljg">|</font>', '', book['ad_title'])
            dic['图书链接'] = book['link_url']
            dic['出版社'] = book['shop_link']['shop_name']
            dic['价格'] = book['sku_price']
            dic['评论数'] = book['comment_num']
        except Exception as e:
            print(e)
            continue

        response = requests.get(dic['图书链接'])

        dic['出版时间'] = ''.join(re.findall('出版时间：(.*?)</li>', response.text, re.S))
        detail = etree.HTML(response.text)
        try:
            dic['出版社'] = detail.xpath('//ul[@id="parameter2"]/li[1]/a/text()')[0]
        except Exception as e:
            print(e)
            continue
        dic['作者'] = re.sub('\s', '', ''.join(detail.xpath('//div[@id="p-author"]//text()')))
        dic['评论内容'] = re.sub('使用心得：', '', ' '.join(detail.xpath('//div[@class="comment-content"]/text()')))

        data_ls.append(dic)
        print(dic)  # 爬虫运行时查看每本书的信息，查看是否有所遗漏或错位
        time.sleep(2)
    return data_ls


if __name__ == '__main__':
    data_ls = []
    for page in range(1, 3):
        url = 'https://search-x.jd.com/Search?callback=jQuery1718204&area=15&enc=utf-8&keyword=python&adType=7&page={}&ad_ids=291%3A34&xtest=new_search&_=1577966921547'.format(page)
        data_ls += entry(url)
    df = pd.DataFrame(data_ls)
    df.to_csv('Python图书信息统计表.csv',index=False)
    print('保存成功')
