import urllib.request as ur
import os
import time
import random

def url_open(url):
    req = ur.Request(url)
    req.add_header('user-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0')
    
    
    proxies = ['35.184.109.90:3128', '198.98.86.111:53281', '159.89.229.66:8080', '75.109.208.168：54342', '159.89.184.107:80', '50.93.200.237:2018']
    proxy = random.choice(proxies)

    proxy_support = ur.ProxyHandler({'http':proxy})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)

    response = ur.urlopen(req)
    html = response.read()

    return html

def get_page(url):
    html = url_open(url).decode('utf-8')

    a = html.find('<a href="https://pht.wangyicdn.com/am/') + 38
    b = html.find('/', a)

    print(html[a:b])

    return html[a:b]

def find_imgs(url):
    url_open(url).decode('utf-8')
    img_address = []
    a = html.find('img src=')
    while a !=-1:
        b = html.find('.jpg', a, a+255)

        if b != -1:
            img_address.append(html[a+30:b+4])
        else:
            b = a + 9

        a = html.find('img src=', b)
    for each in img_address:
        print(each)

    return img_address

def save_imgs(folder, img_address):
    for each in img_address:
        filename = each.split('/')[-1]
        with open(filename, 'wb') as f:
            img = url_open(each)
            f.write(img)


def download_mm(folder = 'OOXX', pages = 10):
    os.mkdir(folder)
    os.chdir(folder)

    url = "https://www.cku01.com/photo/am/"
    page_num = int(get_page(url))

    for i in range(pages):
        page_num -= i
        page_url = url + str(page_num) + '.html'
        img_address = find_imgs(page_url)
        save_imgs(img_address)

if __name__ == '__main__':
    download_mm()