#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.request import URLError, ContentTooShortError
import os
from bs4 import BeautifulSoup
from bs4 import UnicodeDammit
import re
import config

# specify the index url of targeted website
Url = "http://www.umei.cc/p/gaoqing/rihan/20160721190518.htm"

def moveFile(fname, tagetDir):
    cmd = "mv" + " " + fname + " " + "./" + tagetDir + "/" + fname
    os.system(cmd)

def saveImg(bsObj, dirname):
    imgBoxes = bsObj.findAll("div", {"class": "img_box"})
    for singleImgBox in imgBoxes:
        try:
            imgUrl = singleImgBox.find("img").attrs["src"]  # get the corresponding image url
            filename = str(config.INDEX) + ".jpg"
            urlretrieve(imgUrl, filename)  # restore the images through gathered image url
            moveFile(filename, dirname)  # archive the each image into the corresponding directory
            print(imgUrl)
            config.INDEX += 1
        except ContentTooShortError:
            print("DOWNLOADING ERROR")

def main(Url):
    config.INDEX = 1
    # process the first page of photo album including get total page number and crawling
    try:
        html = urlopen(Url)  # combine into a entire URL
    except URLError as e:
        print("url open ERROR!,reason is %s" % e.reason)

    bsObj = BeautifulSoup(html, "html.parser") # create the new BS object be used following
    # get the total pages numbers of the photo album from bottom pages bar
    try:
        pageLinks = bsObj.find("div", {"id": "pagination"}).findAll("div",{"class":"pages"})[1].findAll("a")
        totalPageNum = len(pageLinks)-1
        print("TOTAL PAGES OF THIS ALBUM: "+str(totalPageNum))
    except AttributeError:
        print("CAN NOT GET TOTAL PAGES NUMBER!")

    # crawl the images in first page and get total pages number
    try:
        name = bsObj.find("div",{"class":"pageheader entrypage"}).find("h2").get_text()
        dirName = str()
        print(name)
        for char in name:
            if re.match(r"[a-zA-Z0-9\s\[\]\(\)\.\-\_]", char):
                dirName += char
        print(dirName)
        dirName = dirName.replace('  ', "_")
        dirName = dirName.replace(' ', "_")
        print(dirName)
        if not os.path.exists(dirName):
            try:
                os.system("mkdir " + dirName)
            except:
                print("MAKE DIRECTORY FAIL!")
        saveImg(bsObj, dirName)
    except AttributeError:
        print("can not make directory!")    # the error signal

    # crawl the rest images in rest pages in similar method
    for pageNumber in range(2, totalPageNum+1):   # crawlimg content form page 2
        try:
            html = urlopen(Url[0:len(Url)-4] + "_" + str(pageNumber) + ".htm")
        except URLError as e:
            print("url open ERROR!,reason is %s" % e.reason)
            continue
        bsObj = BeautifulSoup(html, "html.parser")
        try:
            saveImg(bsObj, dirName)
        except AttributeError:
            print(str(pageNumber) + ": ERROR!")
            continue    # continue the entire loop whatever current loop happen even error occur

    # output notation of completion
    print("COMPLETE!")

if __name__ == '__main__':
    main(Url)