"""
三国杀壁纸采集
url：https://www.sanguosha.com/msgs/mWallPaper
author：泉某
date：2023/09/17
"""

import requests
import os
from lxml import etree

url_index = 'https://www.sanguosha.com/msgs/mWallPaper'
url_page = 'https://www.sanguosha.com/msgs/mWallPaper/cur/%s'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
}
img_urls = []


def other_page_imgs(page=2):
    """第二页后的图片url获取"""
    global img_urls
    response = requests.post(url_page%page, headers=headers)
    json_data = response.json()
    for i in json_data:
        if "http" not in i.get('imgurl'):       # 从第10页起的图片为本站图片，没有域名，所以需要为图片地址补全域名
            img_urls.append('https://www.sanguosha.com'+i.get('imgurl'))
        else:
            img_urls.append(i.get('imgurl'))


def index_deal():
    """第一页图片url获取"""
    global img_urls
    response = requests.get(url_index, headers=headers)
    html_text = response.content.decode()
    # with open('sgs.html', 'w', encoding='utf-8')as f:
    #     f.write(html_text)    # 主要用于本地分析HTML页面来提取xpath语句，无实际执行作用
    tree = etree.HTML(html_text)
    img_urls = tree.xpath('//*[@class="galary_wrapper"]/a/@href')


def download_img(num, url):
    """图片下载"""
    if not os.path.exists('三国杀壁纸'):     # 判断代码执行根目录是否存在存储图片的文件夹，如果不存在则创建
        os.mkdir('三国杀壁纸')
    response = requests.get(url, headers=headers)
    with open('三国杀壁纸/%s.jpg'%num, 'wb')as f:
        f.write(response.content)


if __name__ == '__main__':
    for i in range(1, 11):
        if i == 1:
            index_deal()
        else:
            other_page_imgs(page=i)
    for i, url in zip(range(1, len(img_urls)+1), img_urls):
        download_img(i, url)

