# -*- coding: utf-8 -*-
import multiprocessing
import os.path
import requests
from lxml import etree

# 爬 http://wanimal1983.org/ 前10页图片


root_dir = r'D:\work\project\spiders\wanimal1983\result'

def print_process(n, max_count, text):    
    print('\rdownloading(%d/%d) %s ...' % (n, max_count, text), end='')

def do_one_page(data):
    img_url, lock, counter, max_count = data
    with lock:
        counter.value += 1
    
    print_process(counter.value, max_count, img_url)

    try:
        fullname = os.path.join(root_dir, os.path.basename(img_url))
        if os.path.exists(fullname) and os.path.getsize(fullname) != 0:
            return
        with open(fullname, 'wb') as fp:
            fp.write(requests.get(img_url, timeout=10).content)
    except:
        pass


def get_all_page_url():
    result = []
    for i in range(1, 11):
        url = 'http://wanimal1983.org/page/%d' % i
        r = requests.get(url)
        tree = etree.HTML(r.text)
        urls = tree.xpath('//div[@class="media"]/a/img/@src | //div[@class="photoset-grid"]/img/@src')
        for e in urls:
            result.append(e)
    return result

if __name__ == '__main__':
    if not os.path.exists(root_dir):
        os.makedirs(root_dir)
    multiprocessing.freeze_support()
    pool = multiprocessing.Pool(max(multiprocessing.cpu_count() - 1, 1))
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    lock = manager.Lock()

    urls = get_all_page_url()
    max_count = len(urls)
    inputs = map(lambda x: (x, lock, counter, max_count), urls)

    pool.map(do_one_page, inputs)
    pool.close()
    pool.join()
