# coding=utf-8

import requests
from bs4 import BeautifulSoup 
from urllib.parse import urljoin
from time import sleep
from urllib.request import urlretrieve
from time import time
import threading
import queue
from pathlib import Path

base_url = "https://www.changshifang.com/zgmnx/"
# base_url = "http://xmissy.nl/pictures/?page=81"
base_dir = "I:\\test0819-12\\"

headers = {"user-agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.23 Safari/537.36"}

q_list = queue.Queue(maxsize=10)
q_each = queue.Queue(maxsize=100)


class List:

    def __init__(self, url, title):
        self.url = url
        self.title = title

        
class Each:

    def __init__(self, url, path):
        self.url = url
        self.path = path


class Statistics:
    
    def __init__(self):
        self.album = 0
        self.task = 0
        self.done = 0
        self.lock = threading.Lock()
        
    def __str__(self):
        return "album=%s task=%s done=%s" % (self.album, self.task, self.done)

    def add_album(self):
        self.lock.acquire()  # 加锁，锁住相应的资源
        self.album += 1
        self.lock.release()  # 解锁，离开该资源
        
    def add_task(self):
        self.lock.acquire()  # 加锁，锁住相应的资源
        self.task += 1
        self.lock.release()  # 解锁，离开该资源
        
    def add_done(self):
        self.lock.acquire()  # 加锁，锁住相应的资源
        self.done += 1
        self.lock.release()  # 解锁，离开该资源


statistics = Statistics()


def p(str1):
    if 0:
        print("%s" % str1)

    
def main_2_list():
    global base_url
    global headers
    global q_list
    global statistics
    
    cur_url = base_url
    
    while True:
        # 1、遍历当前页面的每个链接
        p("cur_url: " + cur_url)
        
        response = requests.get(cur_url, headers=headers)
        sleep(0.5)
        content = response.content.decode("gbk", "ignore")
    
        soup = BeautifulSoup(content, "html.parser")
        
        for element in soup.select('ul.phlistul > li > div.phavcon > div.vt_rq > h3 > a'):
#             print(element)
            real_url = urljoin(cur_url, element['href'])
            title = element.get_text()
            q_list.put(List(real_url, title))
            statistics.add_album()
        
        # 2、切到下一页
        for i in range(0, 3):
            try:
                next_a = soup.select('div.pagebox > li > a')[0]
                cur_url = urljoin(cur_url, next_a['href'])
                break
            except:
                if i < 3:
                    sleep(1)
                else:
                    print("over break")
                    break


def list_2_each_next_page(soup, cur_url):
    for i in range(0, 3):
        try:
            nxt_a = soup.select('div.art_page > span.next > a')
            real_url = urljoin(cur_url, nxt_a[0]['href'])
            cur_url = real_url
            
            return cur_url
        except Exception as e: 
            if i < 3:
                print(e)
                sleep(0.5)
            else:
                print("error: list_2_each:%s len:%s" % (i, cur_url, str(len(nxt_a))))
                return None

    
def list_2_each():
    global headers
    global q_list
    global q_each
    global statistics
    global base_dir
    
    down_dir = base_dir
    dir1 = Path(down_dir)
    if not dir1.exists():
        dir1.mkdir()
    
    # 一直执行
    while True:
        list1 = q_list.get()
        cur_url = list1.url
        
        # 图片集名字
        name = "%s-%s" % (time(), list1.title)
        #     print(name)
        dir1 = Path(down_dir + name)
        if not dir1.exists():
            dir1.mkdir()
         
        x = 0
        
        # 翻页
        while True:
            p("list_2_each get:" + cur_url)
            response = requests.get(cur_url, headers=headers)
            sleep(1)
            content = response.content.decode("utf-8", "ignore")
            
            soup = BeautifulSoup(content, "html.parser")

            for element in soup.select("a > img"):
                # 正则再校验一下？
                #         print(element)
                if element['src'].endswith("csf.jpg") or element['src'].endswith("ft_logo.jpg"):
                    continue
                real_url = urljoin(cur_url, element['src'])
                each1 = Each(real_url, '%s%s\\%s.jpg' % (down_dir , name , x))
                q_each.put(each1)
                statistics.add_task()
                p("list_2_each PUT:%s:%s" % (each1.url, each1.path))
                x += 1
                
            # 下一页
            cur_url = list_2_each_next_page(soup, cur_url)
            if not cur_url:
                break


def each_down():
    global q_each
    global statistics
    
    while True:
        each1 = q_each.get()
        p("each_down GET:%s:%s" % (each1.url, each1.path))
        for i in range(0, 3):
            try:
                urlretrieve(each1.url, each1.path)
                statistics.add_done()
                break
            except:
                if i < 3:
                    sleep(1)
                else:
                    print("error: urlretrieve(%s)", each1.url)
            
        p("DONE %s->%s" % (each1.url, each1.path))


if __name__ == '__main__':
    
    # 下载图片
    threads = []
    
    # 1、主页->有效页
    t = threading.Thread(target=main_2_list, args=())
    threads.append(t)
    
    # 2、有效页->每张单独页
    t = threading.Thread(target=list_2_each, args=())
    threads.append(t)
    
    # 3、每张单独页下载
    for i in range(0, 20):
        t = threading.Thread(target=each_down, args=())
        threads.append(t)

    # 开干
    count = range(len(threads))
    for i in count:
        threads[i].start()

    while True:
        print("q_list=%s q_each=%s %s" % (q_list.qsize(), q_each.qsize(), statistics))
        sleep(5)
        
    # 收工
    for i in count:
        threads[i].join()
