# Copyright (c) 2011, Felix Laurie von Massenbach <fantasizer@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
#        this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
#        this list of conditions and the following disclaimer in the
#        documentation and/or other materials provided with the distribution.
# 3. Neither the names of the copyright holders nor the names of its
#        contributors may be used to endorse or promote products derived from
#        this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.

# TODO: deal with files and images

# test: http://www2.warwick.ac.uk/fac/sci/psych/students/undergraduate/
# https://websignon.warwick.ac.uk/origin/slogin?shire=https%3A%2F%2Fwww2.warwick.ac.uk%2Fsitebuilder2%2Fshire-read&providerId=urn%3Awww2.warwick.ac.uk%3Asitebuilder2%3Aread%3Aservice&target=http%3A%2F%2Fwww2.warwick.ac.uk%2F

import getpass
import mechanize
import os
import pickle
import re
import time
import urllib
import urlparse


EXTS = [".html", ".htm"]
LINK_RE = re.compile("<link.*\shref=[\'\"](.*?)[\'\"].*?>")
A_RE = re.compile("<a.*\shref=[\'\"](.*?)[\'\"].*?>")


def crawlPages(homeDir, starturl, delay, requestLoginFlag=False, loginurl=None,
               userPass=(None, None)):
    browser = mechanize.Browser()
    if requestLoginFlag:
        requestLogin(browser, loginurl, userPass[0], userPass[1])
    urlOpener = browser#urllib.FancyURLopener()
    linksFilename = os.path.join(homeDir, "current_links")
    try:
        with open(linksFilename, "r") as file:
            linksList = pickle.load(file)
    except IOError:
        linksList = [starturl]
    currenturl = linksList[0]
    homeDomain = urlparse.urlparse(starturl).netloc
    while linksList:
        savePageAndAddLinks(homeDomain, homeDir, urlOpener,
                            currenturl, linksList)
        time.sleep(delay)
        with open(linksFilename, "w") as file:
            pickle.dump(linksList, file)
        currenturl = linksList.pop()
    os.remove(linksFilename)


def savePageAndAddLinks(homeDomain, homeDir, urlOpener, url, linksList):
    urlParts = urlparse.urlparse(url)
    
    if urlParts.netloc and urlParts.netloc != homeDomain:
        return linksList
    
    path = homeDir + urlParts.path
    dirname, filename = os.path.split(path)
    
    if not filename or os.path.splitext(filename)[1] not in EXTS:
        path = os.path.join(path, "index.html")
        dirname, filename = os.path.split(path)
        
    if os.path.exists(path):
        return linksList
    
    try:
        page = urlOpener.open(url).read()
    except IOError:
        return linksList
    
    if not os.path.isdir(dirname):
        os.makedirs(dirname)
    
    links = LINK_RE.findall(page) + A_RE.findall(page)
        
    for link in links:
        linkParts = urlparse.urlparse(link)
        
        if linkParts.netloc:
            tempLink = link
        else:
            tempLink = urlparse.urljoin(url, link)
        
        if tempLink in linksList:
            continue
        
        linksList.append(tempLink)
        
        if "#" in link or "javascript:" in link:
            continue
        
        linkPath = linkParts.path.split("?")[0]
        linkFilename = os.path.split(linkPath)[1]
        
        if not linkPath:
            linkPath = linkParts.netloc
        
        if linkFilename and os.path.splitext(linkFilename)[1] in EXTS:
            page = page.replace(linkPath, os.path.join(linkPath, "index.html"))
            page = page.replace(
                link,
                urlparse.urlunparse(("file", homeDir,
                                     os.path.join(linkPath, "index.html"),
                                     linkParts.params, linkParts.query,
                                     linkParts.fragment)))
        else:
            page = page.replace(
                link,
                urlparse.urlunparse(("file", homeDir, linkPath,
                                     linkParts.params, linkParts.query,
                                     linkParts.fragment)))
    
    with open(path, "w") as file:
        file.write(page)
    
    return linksList


def requestLogin(browser, loginurl, username, password):
    browser.set_handle_robots(False)
    browser.open(loginurl)
    browser.select_form(name="loginform")
    browser["userName"] = username
    browser["password"] = password
    browser.submit()
    browser.set_handle_robots(True)


if __name__ == "__main__":
    homeDir = os.path.realpath(str(raw_input("Please enter the root "
                                             "directory you wish to "
                                             "save the crawl to:\t")).strip())
    opener = urllib.FancyURLopener()
    url = str(raw_input("Please enter the URL of "
                        "the starting point:\t")).strip()
    delay = float(raw_input("Please enter the minimum delay allowed "
                            "between web hits in seconds:\t").strip())
    if str(raw_input("Do you need to login? (y/n)\t")).strip() == "y":
        loginurl = str(raw_input("Please enter the URL "
                                 "to login to:\t")).strip()
        username = str(raw_input("Please enter your username:\t")).strip()
        password = getpass.getpass("Please enter your password:\t")
        crawlPages(homeDir, url, delay, True, loginurl, (username, password))
    else:
        crawlPages(homeDir, url, delay)
    