from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.request import URLError, ContentTooShortError
import os
from bs4 import BeautifulSoup
import re

def moveFile(fname, tagetDir):
    cmd = "mv" + " " + fname + " " + "./" + tagetDir + "/" + fname
    os.system(cmd)

def saveImg(imgIndex):
    imgBoxes = bsObj.findAll("div", {"class": "img_box"})
    for singleImgBox in imgBoxes:
        imgUrl = singleImgBox.find("img").attrs["src"]  # get the corresponding image url
        print(imgUrl)
        try:
            filename = str(imgIndex) + ".jpg"
            urlretrieve(imgUrl, filename)  # restore the images through gathered image url
            moveFile(filename, dirName)  # archive the each image into the corresponding directory
        except ContentTooShortError:
            print("DOWNLOADING ERROR")
        imgIndex += 1

# specify the index url of targeted website
indexUrl = "http://www.umei.cc/p/gaoqing/rihan/20160630185219"

# process the first page of photo album including get total page number and crawling
try:
    html = urlopen(indexUrl + ".htm")  # combine into a entire URL
except URLError as e:
    print("url open ERROR!,reason is %s" % e.reason)

bsObj = BeautifulSoup(html, "html.parser") # create the new BS object be used following
# get the total pages numbers of the photo album from bottom pages bar
try:
    pageLinks = bsObj.find("div", {"id": "pagination"}).findAll("div",{"class":"pages"})[1].findAll("a")
    totalPageNum = len(pageLinks)-1
    print("TOTAL PAGES OF THIS ALBUM: "+str(totalPageNum))
except AttributeError:
    print("CAN NOT GET TOTAL PAGES NUMBER!")

# crawl the images in first page and get total pages number
try:
    albumName = bsObj.find("div",{"class":"pageheader entrypage"}).find("h2").get_text()
    dirName = albumName
    if not os.path.exists(dirName):
        try:
            os.system("mkdir " + dirName)
        except:
            print("MAKE DIRECTORY FAIL!")
    print(albumName)
    index = 1  # set the counter
    saveImg(index)
except AttributeError:
        print("ERROR!")    # the error signal

# crawl the rest images in rest pages in similar method
for pageNumber in range(2, totalPageNum+1):   # crawlimg content form page 2
    try:
        html = urlopen(indexUrl + "_" + str(pageNumber) + ".htm")
    except URLError as e:
        print("url open ERROR!,reason is %s" % e.reason)
        index += 1
        continue
    bsObj = BeautifulSoup(html, "html.parser")
    try:
        saveImg(index)
    except AttributeError:
        print(str(pageNumber) + ": ERROR!")
    continue    # continue the entire loop whatever current loop happen even error occur

# output notation of completion
print("COMPLETE!")




