from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from bs4 import BeautifulSoup
import re
from yandre_crawlerPro import yandere_logcheck

# specify the index url of targeted website
indexUrl = "http://www.umei.cc/p/gaoqing/index-"
interpreter = ["html.parser","lxml","xml","html5lib"]

user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"
headers = {"User-Agent": user_agent}

def getTotalPage(bsObj):
    pageLinks = bsObj.find("div", {"class":"pagination"}).findAll("a")
    totalPage = pageLinks[len(pageLinks)-2].get_text()
    return  totalPage

def writeLog(url):
    logf = open("log.txt", "a")
    logTuple = "%s\n" %(url)
    logf.write(logTuple)
    logf.close()

def listCrawler(bsObj):
    returenList = []
    tupleInfo = {"url": "", "title":"", "page":"", "updateDate":""}
    rows  = bsObj.find("table",{"class":"highlightable"}).find("tbody").findAll("tr")
    for row in rows:
        tupleInfo["title"] = row.findAll("td")[0].find("a").get_text() #title
        tupleInfo["url"]  = row.findAll("td")[0].find("a").attrs["href"] #url
        tupleInfo["page"] = row.findAll("td")[2].get_text() #page
        tupleInfo["updateDate"] = row.findAll("td")[4].get_text() # updatedate
        returenList.append(tupleInfo)
        writeLog(tupleInfo["url"])
    return returenList

# process the first page of photo album including get total page number and crawling
def getSinglePageList():
    try:
        url = "https://yande.re/pool/"
        webpage = Request(url, None, headers)
        html = urlopen(webpage)  # combine into a entire URL
    except URLError as e:
        print("url open ERROR!,reason is %s" %e.reason)

    myBsObj = BeautifulSoup(html, interpreter[3]) # create the new BS object be used following

    # get the total pages numbers of the photo album from bottom pages bar

    try:
        getTotalPage(myBsObj)
        list = listCrawler(myBsObj)
        print(type(list))
        print(list[0]["url"])

    except AttributeError:
        print("CAN NOT GET TOTAL PAGES NUMBER!")


"""
# crawl the rest images in rest pages in similar method
index = 1 # set the count to record the number of album
for pageNumber in range(1, totalPageNum):   # crawlimg content form page 2

    try:
        html = urlopen(indexUrl + str(pageNumber) + ".htm")
    except URLError as e:
        print("(%d)url open ERROR!,reason is %s" % (index, e.reason))
        index += 1
        continue
    print(indexUrl + str(pageNumber) + ".htm")

    bsObj = BeautifulSoup(html, interpreter[3])

    try:
        albumBoxes = bsObj.find("div", {"id": "content"}).find("div", {"class": "catalog"}). \
            find("div", {"id": "msy"}).findAll("div", {"class": "t"})[1:]
        for singleAlbumBox in albumBoxes:
            albumTitle = singleAlbumBox.find("img").attrs["title"]  # get the corresponding image url\
            albumUrl = singleAlbumBox.find("a").attrs["href"]
            # exclude the url to XIUREN VIP through matching the url with regexp,since it contain a few photoes
            rePattern = re.compile('\/p\/gaoqing\/xiuren_VIP')
            reResult = rePattern.match(albumUrl)
            if reResult is not None:
                print("<%d> %s" %(index, "<< THIS IS XIUREN VIP ALBUM! >>"))
                index += 1
                continue
            else:
                print("<%d><TITLE> %s, <URL> %s" %(index, albumTitle, albumUrl))
            index += 1
    except AttributeError:
        print("ERROR!")  # the error signal
    continue

# output notation of completion
print("COMPLETE!")
"""

def main():
    pass

if __name__ == '__main__':
    getSinglePageList()


