import requests
import time
import pandas as pd
from lxml import etree
from datetime import datetime

# //div[@id="container"]  //表示在html全局下，寻找id为container的div标签，因为id是全局唯一的
# 中图网书籍信息：
#   相对路径： //div[@id="container"]/div/div[1]/div[1]/ul/li ----- 1 of 52
#            //*[@id="container"]/div/div[1]/div[1]/ul/li
#   完整路径（绝对路径）：/html/body/form/div[8]/div/div[5]/div/div[1]/div[1]/ul/li   ----- 1 of 52
#                     /html/body/form/div[8]/div/div[5]/div/div[1]/div[1]/ul/li

# html2 = """
# <!DOCTYPE html>
# <html>
#  <head lang='en'>
#     <meta charest='utf-8'>
#     <title></title>
#  </head>
#  <body>
#     <div id="test3">
#     我左青龙,
#         <span id='tiger'>
#             右白虎
#             <ul>上朱雀,
#                 <li>下玄武,</li>
#             </ul>
#         </span>
#         龙头在胸口
#     </div>
#  </body>
# </html>
# """
# # 如果我们想爬取的内容是html文档中的所有文本的话，需要使用string方法进行提取
# selector2 = etree.HTML(html2)
# content2 = selector2.xpath('//div[@id="test3"]')[0]  # 列表，只有一个元素
# print(content2)
# print('----------------')
# info = content2.xpath('string(.)')
# print(info)
# print('----------------')
# info = str(info).replace(' ','').replace('\n','')
# print(info)

# 中图网 热销榜书籍信息
'''
写爬虫可能会考虑的问题：
    1、尽量使用属性的方式定位标签，减少出错的次数。
    2、写爬虫程序的时候，不能保证每一页，每一个商品的内容一样的。

'''
print(
    '*************************************************中图网热销榜书籍信息数据获取开始！****************************************************')
headers = {
    'Cookie': 'BookUser=1%7c2cf91058-a817-475a-96fc-4a31e9911c41%7c1%7c0%7c638436146013202780%7c20180722%7cf63036ee49668189; UserSign=069f073dff21b10b; adtanchu=1; indexCache=yes; ASP.NET_SessionId=y5jb10zbawucjrv3dybfg1uo; Hm_lvt_6993f0ad5f90f4e1a0e6b2d471ca113a=1705396778,1705547370; booklisthistory=5615062; UserUnionId=b03d96e1-0931-4da7-b608-6dbd63986435; UserViewBooks=[5615062]; Hm_lpvt_6993f0ad5f90f4e1a0e6b2d471ca113a=1705560057',
    'Host': 'www.bookschina.com',
    'Referer': 'https://www.bookschina.com/24hour/1_0_34/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}

# 定义每个值的列表
name_list = []
author_list = []
publisher_list = []
star_nums_list = []
comment_list = []
sell_price_list = []
discount_list = []
del_price_list = []

for i in range(1, 35):
    print(f"==================================正在获取第{i}页的数据===================================")
    url = f'https://www.bookschina.com/24hour/1_0_{i}/'
    # 发送请求
    res = requests.get(url=url, headers=headers)
    res.encoding = res.apparent_encoding
    info = res.text
    # print(info, type(info))
    html1 = etree.HTML(info)
    # //div[@id="container"]/div/div[2]/div[2]/ul/li
    li_list = html1.xpath('//div[@id="container"]/div/div[2]/div[2]/ul/li')
    for li in li_list:
        infos = li.xpath('./div[@class="infor"]')[0]
        name = infos.xpath('./h2[@class="name"]/a/text()')[0]
        name_list.append(name)

        author_list2 = infos.xpath('./div[@class="author"]/a/text()')
        if len(author_list2) == 0:
            author = '--暂无作者--'
        else:
            author = author_list2[0]
        author_list.append(author)

        publisher_list2 = infos.xpath('./div[@class="publisher"]/a/text()')
        if len(publisher_list2) == 0:
            publisher = '--暂无出版社--'
        else:
            publisher = publisher_list2[0]
        publisher_list.append(publisher)

        # 统计一颗完整的星的个数
        try:
            one_star_list = infos.xpath('./div[@class="startWrap"]/i[@class="one"]')
            one_star_nums = len(list(one_star_list))
        except:
            one_star_nums = 0

        # 统计是否存在半颗星
        try:
            half_list = infos.xpath('./div[@class="startWrap"]/i[@class="half"]')
            half_nums = len(list(half_list))
        except:
            half_nums = 0

        # 总星个数：
        if half_nums != 0:
            star_nums = float(one_star_nums) + 0.5
        else:
            star_nums = one_star_nums
        star_nums_list.append(star_nums)

        comment = infos.xpath('./div[@class="startWrap"]/a/text()')[0]
        comment_list.append(comment)

        sell_price = infos.xpath('./div[@class="priceWrap"]/span[1]/text()')[0]
        sell_price_list.append(sell_price)

        discount = infos.xpath('./div[@class="priceWrap"]/span[2]/text()')[0]
        # 去除折扣两边的空格  (3.2折)
        discount = str(discount)[1:-1]  # 3.2折
        discount_list.append(discount)

        del_price = infos.xpath('./div[@class="priceWrap"]/del/text()')[0]
        del_price_list.append(del_price)
        print(
            f'书名:{name}, 作者:{author}, 出版社:{publisher}, 推荐指数:{star_nums}颗星, '
            f'评论数:{comment}, 售价:{sell_price}, 折扣:{discount}, 原价:{del_price}')
        print('------------')

print(
    '*************************************************中图网热销榜书籍信息数据获取结束！正在保存到csv文件中....****************************************************')

time.sleep(3)
# 将数据封装成字典
dict1 = {
    '书名': name_list,
    '作者': author_list,
    '出版社': publisher_list,
    '推荐指数': star_nums_list,
    '评论数': comment_list,
    '售价': sell_price_list,
    '折扣': discount_list,
    '原价': del_price_list
}

# 将字典转DataFrame
df1 = pd.DataFrame(dict1)
day_time = datetime.strftime(datetime.now(), '%Y-%m-%d')
df1.to_csv(f'data/中图网热销榜数据-{day_time}.csv', index=False)
print(
    '*************************************************数据保存完毕！在data/中图网热销榜数据-时间.csv中！****************************************************')

