# coding=utf-8

import requests
from bs4 import BeautifulSoup 
from time import sleep
from time import time
import threading
# main
from queue import Queue
# list_2_each
from urllib.parse import urljoin
from pathlib import Path
# each_down
from urllib.request import urlretrieve

target_url = "http://xmissy.nl/pictures/"
# target_url = "http://xmissy.nl/pictures/?page=81"
target_path = r"I:\test0819-12"


class List:

    def __init__(self, url, title):
        self.url = url
        self.title = title

        
class Each:

    def __init__(self, url, path):
        self.url = url
        self.path = path


class Statistics:
    
    def __init__(self):
        self.album = 0
        self.task = 0
        self.done = 0
        self.lock = threading.Lock()
        
    def __str__(self):
        return "album=%s task=%s done=%s" % (self.album, self.task, self.done)

    def add_album(self):
#         self.lock.acquire()  # 加锁，锁住相应的资源
        self.album += 1
#         self.lock.release()  # 解锁，离开该资源
        
    def add_task(self):
#         self.lock.acquire()  # 加锁，锁住相应的资源
        self.task += 1
#         self.lock.release()  # 解锁，离开该资源
        
    def add_done(self):
#         self.lock.acquire()  # 加锁，锁住相应的资源
        self.done += 1
#         self.lock.release()  # 解锁，离开该资源


statistics = Statistics()

headers = {"user-agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.23 Safari/537.36"}

flag_run = True


def p(str1):
    if 0:
        print("%s" % str1)

    
def main_2_list(q_list):
    global target_url
    global headers
    global statistics
    
    cur_url = target_url
    
    while flag_run:
        # 1、遍历当前页面的每个链接
        p("cur_url: " + cur_url)
        
        response = requests.get(cur_url, headers=headers)
        sleep(0.5)
        content = response.content.decode("utf-8", "ignore")
    
        soup = BeautifulSoup(content, "html.parser")
        
        for element in soup.select('div.itemdata > a.itemlink'):
            if element['href'].startswith("/item/"):
                q_list.put(List(element['href'], element['title']))
                statistics.add_album()
        
        # 2、切到下一页
        for i in range(0, 3):
            try:
                next_a = soup.select('li#nextpage a')
                cur_url = next_a[0]['href']
                break
            except:
                if i < 3:
                    sleep(1)
                else:
                    print("over break")
                    break

    print("info: main_2_list OVER")

 
def list_2_each(q_list, q_each):

    global headers
    global statistics
    global target_path
    
    dir1 = Path(target_path)
    if not dir1.exists():
        dir1.mkdir()
    
    while flag_run:
        try:
            list1 = q_list.get(timeout=30)
        except:
            print("info: list_2_each 30s break")
            break
            
        cur_url = urljoin(target_url, list1.url)
        p(cur_url)
        
        response = requests.get(cur_url, headers=headers)
        sleep(0.5)
        content = response.content.decode("utf-8", "ignore")
        
        soup = BeautifulSoup(content, "html.parser")
        
        # 图片集名字
        name = "%s-%s" % (time()  , list1.title)
    #     print(name)
        dir1 = Path(target_path + '\\' + name)
        if not dir1.exists():
            dir1.mkdir()
        
        x = 0
        for element in soup.select("img.imageborder"):
            # 正则再校验一下？
    #         print(element)
         
            each1 = Each(element['src'], '%s\\%s\\%s.jpg' % (target_path , name , x))
            q_each.put(each1)
            statistics.add_task()
            x += 1

    print("info: list_2_each OVER")


def each_down(q_each,):
    global statistics
    
    while flag_run:
        try:
            each1 = q_each.get(timeout=30)
        except:
            print("info: each_down 30s break")
            break
        
        p("each_down GET:%s:%s" % (each1.url, each1.path))
        for i in range(0, 3):
            try:
                urlretrieve(each1.url, each1.path)
                statistics.add_done()
                break
            except:
                if i < 3:
                    sleep(1)
                else:
                    print("error: urlretrieve(%s)", each1.url)
            
        p("DONE %s->%s" % (each1.url, each1.path))

    print("info: each_down OVER")


if __name__ == '__main__':
    
    threads = []
    q_list = Queue(maxsize=10)
    q_each = Queue(maxsize=100)
    
    # 1、主页->有效页
    t = threading.Thread(target=main_2_list, args=(q_list,))
    threads.append(t)
    
    # 2、有效页->每张单独页
    t = threading.Thread(target=list_2_each, args=(q_list, q_each,))
    threads.append(t)
    
    # 3、每张单独页下载
    for i in range(0, 20):
        t = threading.Thread(target=each_down, args=(q_each,))
        threads.append(t)

    # 开干
    count = range(len(threads))
    for i in count:
        threads[i].start()

    i = 0
    while True:
        sleep(5)
        print("q_list=%s q_each=%s %s" % (q_list.qsize(), q_each.qsize(), statistics))
        if q_list.qsize() == 0 and q_each.qsize() == 0:
            if i < 3:
                i += 1
            else:
                break
        
    # 收工
    flag_run = False
    for i in count:
        threads[i].join()
    print("*** Over ***")
