'''
Created on 19 Dec 2011

@author: Dave
'''


import re
import os
import sys
import mechanize
import cookielib
import urllib
#import pdb

def main():
    """ processes command arguments before downloading album"""
    print "number of args ",len(sys.argv)
    if len(sys.argv)<5 :
        print "usage error: photobucketdump.py albumurl useremail password directory"
        return 0
 
    album_url = sys.argv[1]
    my_email = sys.argv[2]
    my_password = sys.argv[3]
    outp_path= sys.argv[4]
    
    return download_album(album_url,my_email, my_password,outp_path)
    
def download_album(album_url,my_email,my_password,outp_path):
    """ recursively extracts photo urls and titles from a photbucket web page"""
 
    photobucket_url = "http://photobucket.com"
            
    print "inp_fname ", album_url
    print "outp_fname ",outp_path
    print "user or email", my_email
    print "password", my_password 
      
    br = initialise_browser()
    
    #pdb.set_trace()

    if False:
        response1 = br.open(photobucket_url)
        my_data = response1.get_data()
        print "my data begin"
        print my_data
        print "my data end"

   
    # The site we will navigate into, handling it's session
    br.open(photobucket_url)
        
    if False:
        i=0
        print "forms begin"
        for f in br.forms():
            print "form", i , " ", f       
            i=i+1
        print "forms end"
        
    # Select the first (index zero) form
    br.select_form(nr=1)
    #give password
    br.form["loginForm[usernameemail]"]=my_email
    br.form["loginForm[password]"]=my_password
        
    if False:
        #check that the form is set correctly
        print "selected form"
        print br.form
        print "end selected form"
        
    #send login response
    br.submit()
    #get response from login
    submit_response = br.response()
    submit_data = submit_response.get_data()

    #now cookies are set after the login 
    #set the album page to show all the photos
    
    return download_tree(br, outp_path, album_url)
    
def download_tree(br, outp_path, album_url):
    """Downloads a complete """
    if  not os.path.isdir(outp_path) :
        print "directory does not exist - creating it"
        os.mkdir(outp_path)
    else:
        print "directory exists"
        
        
    br.open(album_url+'?start=all')
    album_response = br.response()
    album_data = album_response.get_data()
       
    if False:
        #display the album page
        print "album begin"
        print album_data
        print "album end" 
      
    #extract the photo urls from the thumbnails
    photo_urls = extract_photo_urls(album_data)
        
    if False:
        i=0
        print "photo_urls begin"
        for p in photo_urls:
            print i, p
            print "filename", extract_filename(p)
            i=i+1
        print "photo urls end"
    
    subfolder_urls = extract_subfolder_urls(album_data)
     
    if True:
        i=0
        print "subfolder_urls begin"
        for sub in subfolder_urls:
            print i, sub
            print "subfolderame", extract_subfolder_name(sub)
            i=i+1
        print "subfolder urls end"
    
    #download and save the photo urls
    save_photo_urls(photo_urls, outp_path)
        
    print "Finish photos downloaded"
     
    for url in subfolder_urls:
        subfolder_name = extract_subfolder_name(url)
        new_path = outp_path+'/'+subfolder_name
        download_tree(br, new_path, url)
   
    return 1


def extract_filename(url):
    """Extracts the file name from the end of the url"""
    matObj = re.match("http:.*?/([^/]+?\\.(JPG|jpg))", url)
    if matObj==None :
        return ""
    else :
        return matObj.group(1)
        
def extract_subfolder_name(url):
    """Extracts the subfolder name from the end of the url"""
    matObj = re.match("http:.*?/([^/]+?)/$", url)
    if matObj==None :
        return ""
    else :
        return matObj.group(1)

def extract_photo_urls(a_string):
    """extracts photo urls from the page data"""
    thumb_urls = re.findall("pbthumburl=\"(http:.+?/)th_([^/]+?\\.(JPG|jpg))",a_string)
    urls = map(lambda el: el[0]+el[1],thumb_urls)
    return urls

def extract_subfolder_urls(a_string):
    """extracts subfolder urls from the page data"""
    subalbum_urls = re.findall("<div class=\"subalbumthumb bdrClr \">[^<>]+?<a href=\"(http:.+?)\"",a_string)
    return subalbum_urls


def save_photo_urls(urls, directory):
    """down loads  a list of files from their urls to a given directory"""
    for url in urls:
        filename = extract_filename(url)
        save_as= os.path.join(directory, filename)
        print "downloading ", url
        urllib.urlretrieve(url, save_as)
    
def initialise_browser():
    """Initialises the mechnize brower emulation"""
    br = mechanize.Browser()
    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Browser options
    br.set_handle_equiv(True)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    #pretend to be firefox
    br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
    return br


if __name__ == "__main__":
    main()
