import json
import os
import shutil
import urllib.request

import jsonpath

pic = 0
print("所有图片将保存在   D:\堆糖网   ")
pages = 0
while True:
    keyword = input("请输入要搜索下载的图片：")
    if not os.path.exists('D:/堆糖网'):
        os.mkdir('D:/堆糖网')

    keywords = urllib.parse.quote(keyword)
    pages = int(input("请问要下载多少页："))
    for page in range(1, pages + 1):
        url = f'https://www.duitang.com/napi/blogv2/list/by_search/?kw={keywords}&after_id={(page - 1) * 24}&type=feed' \
              '&include_fields=top_comments%2Cis_root%2Csource_link%2Citem%2Cbuyable%2Croot_id%2Cstatus%2Clike_count' \
              f'%2Clike_id%2Csender%2Calbum%2Creply_count%2Cfavorite_blog_id&_type=&_=16731739780{65 + page} '
        headers = {
            "User-Agent": "Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,"
                          "likeGecko)Chrome/101.0.0.0Safari/537.36 "
        }
        # 请求对象的定制
        request = urllib.request.Request(url=url, headers=headers)

        response = urllib.request.urlopen(request)

        html = response.read().decode('utf-8')

        if not os.path.exists(f'D:/堆糖网/{keyword}'):
            os.mkdir(f'D:/堆糖网/{keyword}')

        files = f'D:/堆糖网/{keyword}/pixiv-{pic}.json'
        with open(files, 'w', encoding='utf-8') as f:
            f.write(html)

        # jsonpath 解析JSON文件
        obj = json.load(open(f'D:/堆糖网/{keyword}/pixiv-{pic}.json', 'r', encoding='utf-8'))
        img_list = jsonpath.jsonpath(obj, '$..path')
        name_list = jsonpath.jsonpath(obj, '$..msg')
        no_name = jsonpath.jsonpath(obj, '$..message')
        if no_name:
            print("没有搜到你想要的内容，请换个词试试！")
            shutil.rmtree(f'D:/堆糖网/{keyword}')
            continue
        # print(img_list)
        # 保存图片
        for i in range(len(img_list)):
            src = img_list[i]
            name = name_list[i]
            suffix = src.split('.')[-1]
            # urllib.request.urlretrieve(url=src, filename='./堆糖网/' + name + '.' + 'jpg')
            try:
                urllib.request.urlretrieve(url=src,
                                           filename=f'D:/堆糖网/{keyword}/' + f'{name}-{pic + 1}' + '.' + suffix)
                print(f'{name}_{pic + 1}---下载成功！')
            except:
                print(f'{name}_{pic + 1}---下载失败！')
            pic += 1

        print(f'*********************当前在{page}页*********************')
        os.remove(files)
    print("完成！")
