#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# author: maxiao
# file_name: dangdang.py
# date: 2022/11/3 21:12
# description: 爬取当当网数据

from bs4 import BeautifulSoup
from faker import Faker
import logging
import requests
import pandas as pd

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s-->%(levelname)s-->%(message)s")


class DangDang:
    """获取当当图书网排行榜相关信息，并做持久化存储"""
    num_list = []
    name_list = []
    star_list = []
    publisher_list = []
    date_list = []
    price_n_list = []
    price_r_list = []
    publisher_info_list = []

    def get_information(self, page):
        """提取当当图书网相关信息，追加到列表中"""
        url = f'http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent30-0-0-1-{page}'
        header = {'User-Agent': Faker().user_agent}
        response = requests.get(url, header).text
        # 将页面内容解析成HTML对象
        bs = BeautifulSoup(response, 'html.parser')
        ul = bs.find('ul', 'bang_list')
        li_list = ul.find_all('li')
        for li in li_list:
            # 图书排名
            list_num = li.find('div', 'list_num').text[:-1]
            self.num_list.append(list_num)
            # 图书名称
            name = li.find('div', 'name').find('a').text
            self.name_list.append(name)
            # 图书评论数
            star = li.find('div', 'star').find('a').text[:-3]
            self.star_list.append(star)
            # 图书出版日期
            publisher_date = li.find_all('div', 'publisher_info')[1].find('span').text
            self.date_list.append(publisher_date)
            # 图书出版社
            publisher_name = li.find_all('div', 'publisher_info')[1].find('a').text
            self.publisher_list.append(publisher_name)
            # 图书折扣价
            price_n = li.find('div', 'price').find('span', 'price_n').text[1:]
            self.price_n_list.append(price_n)
            # 图书原价
            price_r = li.find('div', 'price').find('span', 'price_r').text[1:]
            self.price_r_list.append(price_r)
            # 图书出版相关信息（作者、译者）
            publisher_info = li.find('div', 'publisher_info').text
            self.publisher_info_list.append(publisher_info)

    def save_data(self, name):
        df = pd.DataFrame({
            '图书排名': self.num_list,
            '图书名称': self.name_list,
            '图书评论数': self.star_list,
            '图书折扣价': self.price_n_list,
            '图书原价': self.price_r_list,
            '图书出版日期': self.date_list,
            '图书出版社': self.publisher_list,
            '图书作者及译者': self.publisher_info_list
        })
        df.to_csv(name, sep=',', index=False, encoding='utf-8')


# 实例化对象
dd = DangDang()
# 提取1至25页的所有数据
for num in range(1, 26):
    rate = num * 100 / 25
    logging.info(f"开始从当当图书网提取第{num}页的数据，进度{rate}%")
    dd.get_information(num)
logging.info("数据提取完毕！！！")
dd.save_data('top500.csv')

