#! /usr/bin/env python
# coding=utf-8

# downPicasa.py
# This program can download all photo inside the Album of webPicasa which 
# is a web photos service support by Google
# Because Picasa save all images' URL inside JavaScript code
# So, just need to load any one page of all images page
# It means the url argument is any one photo, not the album index

# URL
url = ['picasaweb.google.com']

import sys, re, urllib2
import pdb
try:
    import BeautifulSoup
except ImportError:
    print "Can NOT find BeautifulSoup, Please Check the file"
    raise SystemExit

class MyParser:
    """this class can process the HTML data"""
    
    def __init__(self):
        """class initialize"""
        # if it has next page, it will be the URL of next page
        self.go_next = ""
        # the photos links
        self.links = []
        # the target image link
        self.link = ""
        # start to download the pictures
        self.download = 0
    
    def feed(self, data):
        """processing the data"""
        
        # compile the Regular expressions
        # get the script inside the HTML
        re_script = re.compile('(?<=<script type="text/javascript">).+?(?=</script>)', re.I | re.S | re.U)
        # find the albums data, which should be the photos' id, urls, weight and height etc.
        re_albums = re.compile('{id:"\d+",v:"\d+",w:\d+,h:\d+,s:"[A-Za-z0-9_\-%\\\.:]*",t:"[A-Za-z0-9\(\)\.\- ]+",l:"[A-Za-z0-9%\\\.:#=\?]*",dl:\w}')
        # get the photos' url
        re_photos = re.compile('(?<=s:")[A-Za-z0-9_\-%\\\.:]*(?=")')
        # get all href from web page
        re_href = re.compile('(?<=<a ).+?(?=</a>)')
        # get the picture's url
        re_pic = re.compile( '(?<=linkindex="22" href=")http:[A-Za-z0-9/=\-%_\?\.]*(?=" id="download">下載相片)' )
        
        if self.download == 0:
            # scan the pictures' url from web page
            print "search"
            for f_script in re_script.findall(data):
                for f_photos in re_photos.findall(f_script):
                    self.links.append(f_photos.replace('\\x2F', '/'))


class main:
    """main class"""
    
    def __init__(self, url):
        """class initialize"""
        # create the Parser
        self.parser = MyParser()
        # last processing url
        self.page_url = ""
        self.link_list = []
        self.img_list = []
        # the url of next page
        self.next_page = ""
        # total images that downloaded
        self.total = 0
        print "Program Start"
        self.getPage(url)
        print "Search all pictures' links Finished!!"
        self.getPic(self.link_list)
        print "Total: %d" % self.total
    
    def testExist(self, file_name):
        """this function can test a file is already existed or not
            and it can return a new file name which can use to save the image"""
        try:
            file = open(file_name)
        except IOError:
            # if the file does not exist, the file name can be used
            return file_name
        if len(file.read()) == 0:
            # if the file does not have any content, just cover it
            file.close()
            return file_name
        else:
            # it the file is exist, try any other file name
            file.close()
            a = 1    # using for exit the loop
            i = 0    # append to the new file name
            # split the file name by '.', then the last past is the extension
            file_names = file_name.split('.')
            ext = file_names[-1]
            # let n as the length of extension, l as the length of whole file name
            # l - n -1 is the index of the character . inside the file name
            n = len(ext)
            l = len(file_name)
            ln = l - n
            name = file_name[:ln]
            while a:
                new_file_name = "%s%03d.%s" % (name, i, ext)
                try:
                    file = open(new_file_name)
                except IOError:
                    # if the new file name does not exist, it can be used
                    a = 0
                else:
                    if len(file.read()) == 0:
                        # if the new file name has already existed, but it's NULL, also can be used
                        a = 0
                    else:
                        # otherwise, try another name
                        i+=1
                        file.close()
            file.close()
            return new_file_name

    def getPic(self, link_list):
        """get the Pictures from Web Page"""
        for i in link_list:
            for j in i:
                # because the image's url is saving inside the JavaScript code
                # So, just get the url and download it
                self.downLink(j)
                # Below is junk code, no used
                #data = self.goLink(j)
                #if (len(data)):
                    # go to the final step, parse the data find the image's url
                    #self.parser.__init__()
                    #self.parser.download = 1
                    #self.parser.feed(data)
                    # get the image url
                    #self.img_list.append(self.parser.link)
                    # download the image
                    #self.downLink(self.parser.link)

    def getPage(self, url):
        """get the Web HTML Page"""
        # get the data
        data = self.goLink(url)
        if ( data ):
            # creat the HTML Parser
            self.parser.__init__()
            # get the information
            self.parser.feed(data)
            # get the search result
            self.link_list.append(self.parser.links)
            self.total += len(self.parser.links)
            while (self.parser.go_next):
                print "Next: %s" % self.parser.go_next
                data = self.goLink(self.parser.go_next)
                if data:
                    self.parser = MyParser()
                    self.parser.feed(data)
                    #print parser.result()
                    self.link_list.append(self.parser.links)

    def downLink(self, input_url):
        """down a file from url"""
        # check the url
        if input_url.startswith('http'):
            # this is an absolute url
            url = input_url
        elif input_url.startswith('/'):
            # it is a relative url, but relate to the root
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            # it is a relative url also, but relate to "this page"
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Download this: ", url,
        request = urllib2.Request(url, None,
                {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)',
                 'Referer': self.page_url})
        img = urllib2.urlopen(request)
        file_name = img.geturl().split('/')[-1]
        file_name = self.testExist(file_name)
        file = open(file_name, "wb")
        print >> file, img.read()
        file.close()
        img.close()
        print "    Saved as %s" % file_name

    def goLink(self, input_url):
        """open a url, and return the data"""
        # check the url
        if ( input_url.startswith('http') ):
            url = input_url
        elif ( input_url.startswith('/') ):
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Open URL: %s" % url
        print "Loading........."
        # creat a request
        request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)'})
        # open the url
        html_p = urllib2.urlopen(request)
        # get the html page data from http server
        data = html_p.read()
        html_p.close()
        # return the data
        print "download page successful"
        self.page_url = html_p.geturl()
        return data


if __name__ == '__main__':
    if (len(sys.argv) == 2):
        main(sys.argv[1])
    else:
        url = raw_input(":")
        main(url)