# coding=utf-8

# from selenium import webdriver
# import csv
# from selenium.webdriver.chrome.options import Options
# from selenium.common.exceptions import NoSuchElementException
# import datetime

import requests
from bs4 import BeautifulSoup 
# import re
from urllib.parse import urljoin
from time import sleep
from urllib.request import urlretrieve
from time import time
import os

# visited_urls = []
# unvisited_urls = ["http://xmissy.nl/pictures/"]
# 
# 
# def get_cur_url():
#     while True:
#         if len(unvisited_urls)==0:
#             return None
#         
#         url = unvisited_urls.pop()
#         
#         if url in visited_urls:
#             continue
#         
#         visited_urls.append(url)
#         return url

base_url = "http://xmissy.nl/pictures/"
# base_url = "http://xmissy.nl/pictures/?page=81"
headers = {"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}

base_dir = "I:\\test\\"


def sub_get_img(url):
    real_url = urljoin(base_url, url)
    print(real_url)
    
    response = requests.get(real_url, headers=headers)
    sleep(0.5)
    content = response.content.decode("utf-8", "ignore")
    
    soup = BeautifulSoup(content, "html.parser")
    
    # 图片集名字
    try:
        name_h1 = soup.select("h1#pagetitle")
        assert(len(name_h1) == 1)
        
        name = name_h1[0].text
    except:
        name = str(time())
    
#     print(name)
    os.mkdir(base_dir + name)
    
    # 下载图片
    x = 0
    for element in soup.select("img.imageborder"):
        # 正则再校验一下？
#         print(element)
    
        urlretrieve(element['src'], base_dir + name + r'\%s.jpg' % x)
        x += 1
                
                
if __name__ == '__main__':
    
    while True:
        # 1、遍历当前页面的每个链接
        print(base_url)
        
        response = requests.get(base_url, headers=headers)
        sleep(0.5)
        content = response.content.decode("utf-8", "ignore")
    
        soup = BeautifulSoup(content, "html.parser")
        
        for element in soup.select('div.itemdata > a.itemlink'):
#             print("element: " +str(element))
#             if re.match("ht", element['href']):
            if element['href'].startswith("/item/"):
                sub_get_img(element['href'])
          
#             break
        
        # 2、切到下一页
#         break
        try:
            next_a = soup.select('li#nextpage a')
            assert(len(next_a) == 1)
            
            base_url = next_a[0]['href']

        except:
            print("over break")
            break
        
