from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
import crawler

def isInLog(albumUrl):
    logFile = open("Umei_log.txt", "r")
    rows = logFile.readlines()
    for row in rows:
        if indexUrl+albumUrl == row.strip():
            logFile.close()
            return True
        else:
            return False

def writeLog(albumUrl):
    logFile = open("Umei_log.txt", "a")
    text = "%s%s\n" %(indexUrl, albumUrl)
    logFile.write(text)
    logFile.close()

# specify the index url of targeted website
indexUrl = "http://www.umei.cc"
interpreter = ["html.parser","lxml","xml","html5lib"]

user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"
headers = {"User-Agent": user_agent}

# process the first page of photo album including get total page number and crawling
try:
    url = indexUrl + "/p/gaoqing/index-"+ "1" + ".htm"
    webpage = Request(url, None, headers)
    html = urlopen(webpage)  # combine into a entire URL
except URLError as e:
    print("url open ERROR!,reason is %s" %e.reason)

bsObj = BeautifulSoup(html, interpreter[3]) # create the new BS object be used following

# get the total pages numbers of the photo album from bottom pages bar
try:
    pageLinks = bsObj.find("div", {"id": "pagination"}).find("div",{"class":"pages"}).findAll("a")
    totalPageNum = len(pageLinks)-2    # "-2" since the bottom pages bar has "total","previous","next"
    print("TOTAL PAGES OF THIS ALBUM: "+str(totalPageNum))
except AttributeError:
    print("CAN NOT GET TOTAL PAGES NUMBER!")

# crawl the rest images in rest pages in similar method
index = 1 # set the count to record the number of album
for pageNumber in range(1, totalPageNum):   # crawlimg content form page 2

    try:
        html = urlopen(indexUrl +"/p/gaoqing/index-" + str(pageNumber) + ".htm")
    except URLError as e1:
        print("(%d)url open ERROR!,reason is %s" % (index, e1.reason))
        index += 1
        continue
    except HTTPError as e2:
        print("(%d)url open ERROR!,error code is %d, reason is %s" % (index, e2.code, e2.reason))
        index += 1
        continue
    print(indexUrl + str(pageNumber) + ".htm")

    bsObj = BeautifulSoup(html, interpreter[3])

    try:
        albumBoxes = bsObj.find("div", {"id": "content"}).find("div", {"class": "catalog"}). \
            find("div", {"id": "msy"}).findAll("div", {"class": "t"})[1:]
        for singleAlbumBox in albumBoxes:
            albumTitle = singleAlbumBox.find("img").attrs["title"]  # get the corresponding image url\
            albumUrl = singleAlbumBox.find("a").attrs["href"]

            rePattern = re.compile('\/p\/gaoqing\/xiuren_VIP')  # exclude the url to XIUREN VIP through matching the url with regexp,since it contain a few photoes
            reResult = rePattern.match(albumUrl)
            if reResult is not None:
                print("<%d> %s" %(index, "<< THIS IS XIUREN VIP ALBUM! >>"))
                index += 1
                continue
            else:
                print("<%d><TITLE> %s, <URL> %s" %(index, albumTitle, albumUrl))
                if isInLog(albumUrl):
                    print("repeat in log file")
                    break
                else:
                    writeLog(albumUrl)
                    crawler.main(indexUrl + albumUrl)
            index += 1
    except AttributeError:
        print("ERROR!")  # the error signal
    continue

# output notation of completion
print("COMPLETE!")