# 1.爬取发表情这个网站的热门表情(https://www.fabiaoqing.com/biaoqing)
# 要求
# <1>爬取前10页的热门表情
# <2>交作业的时候提交代码即可
#
# 2.爬取发表情这个网站的情侣表情 (https://www.fabiaoqing.com/bqb/lists/type/liaomei.html)(选做)


import requests
from lxml import etree
import os
def baq(page):
    # 获取源代码
    url = f"https://www.fabiaoqing.com/biaoqing/lists/page/{page}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.78',
        'Referer': 'https://www.fabiaoqing.com/'}
    # 源代码
    r = requests.get(url,headers=headers).text
    print(r)
    # 解析源代码，获取所有图片地址
    ret = etree.HTML(r) # 转换xml类型
    # 获取任意的div标签
    div_list = ret.xpath('//div[@class="tagbqppdiv"]')
    for div in div_list:
        # 获取div标签下的img标签
        src = div.xpath('.//img/@data-original')[0]
        # 获取图片后缀
        end = src.split('.')[-1]
        # 获取图片名字
        title = div.xpath('.//img/@title')[0]
        title = title.replace('?','').replace(':','').replace('/','').replace('<','').replace('\\','').replace('\n','').replace('.','')

        # 保存图片
        imge_code = requests.get(url= src,headers=headers).content
        print(title)

        with open(f'{word}/{title}.{end}','wb') as f:
            f.write(imge_code)

if __name__ == '__main__':
    word = '表情包'
    if not os.path.exists(word):
        os.mkdir(word)
for page in range(1,11):
    baq(page)
