import urllib.request
import hashlib
import base64
import time
import os

from lxml import etree
from pathlib import Path

"""
小甲鱼的方法不能使用了
大神解决了，原博客地址：
https://fanfanblog.cn/157.html
"""


# 处理md5编码问题
def handle_md5(hd_object):
    return hashlib.md5(hd_object.encode('utf-8')).hexdigest()


# 处理base64编码问题
def handle_base64(hd_object):
    return str(base64.b64decode(hd_object))[2:-1]


# 解密图片链接
def parse(ig_hs, ct):
    count = 4
    contains = handle_md5(ct)
    ig_hs_copy = ig_hs
    p = handle_md5(contains[0:16])
    m = ig_hs[0:count]
    c = p + handle_md5(p + m)
    n = ig_hs[count:]
    l = handle_base64(n)
    k = []
    for h in range(256):
        k.append(h)
    b = []
    for h in range(256):
        b.append(ord(c[h % len(c)]))
    g = 0
    for h in range(256):
        g = (g + k[h] + b[h]) % 256
        tmp = k[h]
        k[h] = k[g]
        k[g] = tmp
    u = ''
    q = 0
    z = 0
    for h in range(len(l)):
        q = (q + 1) % 256
        z = (z + k[q]) % 256
        tmp = k[q]
        k[q] = k[z]
        k[z] = tmp
        u += chr(ord(l[h]) ^ (k[(k[q] + k[g]) % 256]))
    u = u[26:]
    u = handle_base64(ig_hs_copy)
    return u


# 存储图片到本地
def save_image(url):
    # 字符串前加上 r 是防止字符转义
    # path = r'C:\Users\liaofan\Desktop\OOXX'
    path = '000'
    # 判断文件夹是否存在
    if not Path(path).is_dir():
        # 创建文件夹
        os.mkdir(path)
        print('创建文件夹')

    image_name = url.split('/')[-1]
    folder = os.path.exists(path)
    if not folder:
        os.makedirs(path)
        urllib.request.urlretrieve(url, path + '\\' + image_name)
    else:
        urllib.request.urlretrieve(url, path + '\\' + image_name)


# 获取图片url
def get_url():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    # 由于所有页面加载的是同一个JS文件，所以这个值暂时固定
    arg = '5HTs9vFpTZjaGnG2M473PomLAGtI37M8'
    # 页面中含有几个这样的JS文件，一部分都注释了，只留一个，看来这个网站更换过JS文件
    # js_url = re.search(r'/\/cdn\.jandan\.net/static/min/[0-9a-zA-Z]+?\.[0-9]+?\.js', req.text, re.S)
    page_num = input('请输入你想要获取的页数：')
    for i in range(1, int(page_num) + 1):
        url = 'http://jandan.net/ooxx/page-' + str(i) + '#comments'
        req = urllib.request.Request(url, headers=headers)
        response = urllib.request.urlopen(req)
        html = etree.HTML(response.read().decode('utf-8'))
        img_hash = html.xpath('//span[@class="img-hash"]/text()')
        print('正在获取第 %d 页图片中（请耐心等候）......' % i)
        for item in img_hash:
            print('http:' + parse(item, arg))
            save_image('http:' + parse(item, arg))
            time.sleep(1)
        print('第 %d 页图片链接获取完毕！' % i)
        time.sleep(2)


if __name__ == '__main__':
    get_url()
