#!/usr/bin/env python
# coding=utf-8

import urllib2, HTMLParser
from BeautifulSoup import BeautifulSoup
import os, sys

class MyHTMLParser():
    """my HTML Parser"""
    def __init__(self):
        """init"""
        # if it has next page, it will be the URL of next page
        self.go_next = ""
        # the photos links
        self.links = []
        # the target image link
        self.link = ""
        # start to download the pictures
        self.download = 0

    def feed(self, data):
        """parse the data"""
        # clean the data
        data2 = self.cleanData(data)
        soup = BeautifulSoup(data2)
        #self.logSoup(soup.prettify())
        # searching photos setp
        if not self.download:
            # find the next page link
            # target link is inside a span which class is pages, and the string is "下一页"
            # find the div taht class is pages
            # the page may have tow target class because the developer's error, they should be same so we only need the first one
            try:
                thing_div = soup('span', attrs={"class":"pages"})[0]
            except IndexError:
                print "target not found"
                raise SystemExit
            # find all links under the span block
            links = thing_div('a')
            for i in links:
                # if the link's string is "下一页", that is the next page link
                if i.string:
                    if i.string == u"下一页":
                        self.go_next = i['href']

            # find the photos' pages links
            # target links is under a table which class is "AlbumPic img-150-100"
            # because the page writer is rubbish, the order of the photos is not correct, so we also need to get the photos' order
            # but the order is broken, we can't find it
            # the photo's order is inside the thumbimg's alt attr
            # find all td tags which class is thumbImg
            things = soup('td', attrs={"class":"AlbumPic img-150-100"})
            if len(things):
                for i in things:
                    # get the links inside the table
                    if i.a:
                        self.links.append(i.a['href'])


        # get the images links
        # target link is under a div block which class is Photo
        elif self.download:
            # find the div that id is imageFrame
            try:
                thing = soup('div', attrs={"class":"Photo"})[0]
            except IndexError:
                print "target not found"
                raise SystemExit
            # get the src value from the img tag
            if thing.a:
                self.link = thing.a['href']

    def cleanData(self, data):
        """Because the rubbish editer so we need to clean the data"""
        encode_line = '<meta http-equiv="Content-Type" content="text/html; charset=gbk">'
        data2 = ""
        lines = data.splitlines()
        for line in lines:
            if "meta" in line and "charset" in line:
                data2 += encode_line + '\n'
            if "title=" in line:
                n = line.find('href=')
                data2 += '<A ' + line[n:] + '\n'
            else:
                data2 += line + '\n'
        return data2

    def logSoup(self, soup):
        """make a log for soup"""
        log_file = open("log", "a")
        print >> log_file, "\n\n=========================================================================================\n\n"
        print >> log_file, soup
        print >> log_file, "\n\n=========================================================================================\n\n"
        log_file.close()


class main:
    """main class"""
    def __init__(self, url):
        # last processing url
        self.page_url = ""
        self.link_list = []
        self.img_list = []
        # downloading which photo
        self.downloading = 0
        # total images that downloaded
        self.total = 0
        print "Program Start"
        self.getPage(url)
        print "Search all pictures' links Finished!!"
        self.getPic(self.link_list)
        # jot down a comment
        self.comment(url)

    def getPic(self, link_list):
        """get the Pictures from Flickr"""
        for i in link_list:
            for j in i:
                data = self.goLink(j)
                if (data):
                    # go to the final step, parse the data find the image's url
                    parser = MyHTMLParser()
                    parser.download = 1
                    parser.feed(data)
                    # get the image url
                    self.img_list.append(parser.link)
                    # download the image
                    self.downloading += 1
                    print "downloading No.%s of total %s photos" % (self.downloading, self.total)
                    self.downLink(parser.link)
                    #print parser.link

    def getPage(self, url):
        """get the Flickr HTML Page"""
        # get the data
        data = self.goLink(url)
        if ( data ):
            # creat the HTML Parser
            parser = MyHTMLParser()
            # get the information
            parser.feed(data)
            # get the search result
            self.link_list.append(parser.links)
            self.total += len(parser.links)
            while (parser.go_next):
                print "Next: %s" % parser.go_next
                data = self.goLink(parser.go_next)
                if data:
                    parser = MyHTMLParser()
                    parser.feed(data)
                    #print parser.result()
                    self.link_list.append(parser.links)
                    self.total += len(parser.links)
        print "found total %s photos" % self.total

    def downLink(self, input_url):
        """down a file from url"""
        # check the url
        if input_url.startswith('http'):
            # this is an absolute url
            url = input_url
        elif input_url.startswith('/'):
            # it is a relative url, but relate to the root
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            # it is a relative url also, but relate to "this page"
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Download this: ", url
        request = urllib2.Request(url, None,
                {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)',
                 'Referer': self.page_url})
        img = urllib2.urlopen(request)
        file_name = img.geturl().split('/')[-1]
        file = open(file_name, "wb")
        print >> file, img.read()
        file.close()
        img.close()

    def goLink(self, input_url):
        """open a url, and return the data"""
        # check the url
        if ( input_url.startswith('http') ):
            url = input_url
        elif ( input_url.startswith('/') ):
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Open URL: %s" % url
        print "Loading........."
        # creat a request
        request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)'})
        # open the url
        html_p = urllib2.urlopen(request)
        # get the html page data from http server
        data = html_p.read()
        html_p.close()
        # return the data
        print "download page successful"
        self.page_url = html_p.geturl()
        return data

    def comment(self, url):
        """write some useful information to a text file"""
        file = open("via", "w")
        print >> file, url
        file.close()


if __name__ == "__main__":
    if (len(sys.argv) == 2):
        main(sys.argv[1])
    else:
        url = raw_input(":")
        main(url)
