import requests, re, json
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np


def get_basicinfo(url):
    headers = {'UserAgent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.16 Safari/537.36'}
    response = requests.get(url,headers=headers)
    html = response.text
    xml = etree.HTML(html)
    books = xml.xpath('//ul[@class="bigimg"]/li')
    ls = []
    for book in books:
        data = {}
        data['书名'] = book.xpath('./a/@title')[0]
        data['链接'] = book.xpath('./a/@href')[0]
        data['作者'] = ''.join(book.xpath('./p[@class="search_book_author"]/span[1]//text()'))
        data['出版社'] = book.xpath('./p[@class="search_book_author"]/span[3]/a/text()')[0]
        data['出版时间'] = book.xpath('./p[@class="search_book_author"]/span[2]/text()')[0][-10:]
        data['价格'] = float(re.sub(r'¥','',book.xpath('./p[@class="price"]/span[@class="search_now_price"]/text()')[0]))
        data['评论数量'] = int(re.sub(r'条评论','',book.xpath('./p[@class="search_star_line"]/a/text()')[0]))
        ls.append(data)
    return ls

def get_comment(book_id,data):
    headers = {
        'referer': 'http://product.dangdang.com/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \
                           Safari/537.36 SE 2.X MetaSr 1.0'
    }
    comments = []
    for page in range(10):
        print(f"在爬取{book_id}的第{page+1}页评论",end='')
        try:
            comment_url = f'http://product.dangdang.com/index.php?r=comment%2Flist&productId={book_id}&categoryPath=01.54.06.19.00.00&mainProductId={book_id}&pageIndex={page+1}'
            res = requests.get(comment_url,headers=headers)
            dic = json.loads(res.text[:])
            comment_text = dic['data']['list']['html']
            xml_c = etree.HTML(comment_text)
            comments += xml_c.xpath('//div[@class="describe_detail"]/span//text()')
            print('✔')
        except Exception as e:
            print('✘',e)
    data['评论'] = re.sub(r'\s','',''.join(comments))
    return data

if __name__ == '__main__':
    url = 'http://search.dangdang.com/?key=python&act=input'
    ls = get_basicinfo(url)
    executor = ThreadPoolExecutor(max_workers=5)
    id_ls = [re.findall(r'\d+', dic['链接'])[0] for dic in ls]
    info0 = executor.map(get_comment,id_ls,ls)
    info = [i for i in info0]
    df = pd.DataFrame(info)
    df.to_csv('Python图书信息统计表.csv',index=False)
    print('保存成功')
