#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 textwidth=79 autoindent

"""
Python source code
Last modified: 23 Jul 2011 - 19:41
Last author: Laban Mwangi

Scraper for Queensland main roads from selected regions.
Source http://www.tmr.qld.gov.au/Traffic-cameras-by-location.aspx


"""
import optparse
import urllib2
import urllib
import time
import re
from BeautifulSoup import BeautifulSoup

DEFAULT_URL = "http://www.tmr.qld.gov.au/Traffic-cameras-by-location.aspx"

#TODO: Probably make this class inheritable and overloadable
class Scrape(object):

    """
    Scrape parses the traffic cameras page located at tmr.qld.gov.au
    Keywords
        url - url to camera page
    """
    def __init__(self, url, CACHE_PATH):
        super(Scrape, self).__init__()
        self.url = url
        self.CACHE_PATH = CACHE_PATH
        self.top = re.match(r"(http://.*?)/.*", self.url).group(1)
        self.headers = []
        self.headers.append(
            ('User-Agent',
             'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110302 Iceweasel/3.5.17 (like Firefox/3.5.17)'))
        self.headers.append(('Host', 'www.tmr.qld.gov.au'))
        self.loc_cache = {}
        self.tree = None

    def init_tree(self):
        # Fetch the tree
        self.tree = BeautifulSoup(self.getHTML(self.url))

    def getHTML(self, url):
        """getHTML - return html from a page"""
        print "request to open '%s'" % url
        try:
            # On some occassions, we have unquoted urls.
            url = urllib.quote(url, '/=?:')
            req=urllib2.Request(url)
            for header in self.headers:
                req.add_header(header[0], header[1])
            usock = urllib2.urlopen(req)
            response = usock.read()
            usock.close()
            return response
        except urllib2.HTTPError, error:
            print error.msg, error.code, error.geturl()
            raise

    def _strip(self, s):
        """
        Removes html tags and stores cities in the instance
            s -  string with html
        """
        if not s:
            return ""
        s = re.sub('<.+?>|Image:.+?\r|\r', '', s)
        s = s.replace('&#39;', '\'') #replace html-encoded double-quotes
        s = s.replace('&amp;','&')
        s = s.replace('\t', ' ')
        s = re.sub('\s+', ' ', s)
        s.strip()
        return s

    def getCities(self):
        """
        getCities - Returns a list of cities/towns and sets
        an instance variable
        """
        # Moving this out of init speeds things up a little
        if not self.tree:
            self.init_tree()

        cities = self.tree.find('ul',
                                   attrs={"id": "region-list"}).findAll("li")
        self.cities = dict([self._strip(city.text), city] for city in cities)
        return [{"title": city, "description":"Cameras for %s" % city} for city
                in self.cities.keys()]

    def getLocations(self, city):
        """getLocations - returns known cams in a given city  """
        city_pivot = self.cities[city]
        hash_key = self._strip(city_pivot.text)

        loc_url = "%s/%s" % (self.top,
                             city_pivot.a.get("href"))

        self.loc_cache[hash_key] = BeautifulSoup(self.getHTML(loc_url))

        tree = self.loc_cache[hash_key]

        locations_tree = tree.find('ul',
                                 attrs={"id": "trafficCamList"}).findAll("li")

        locations = [[self._strip(location.text),
                      location.a.get("href")] for location in locations_tree]
        #Fix links
        for idx in range(len(locations)):
            if not locations[idx][1].startswith("http"):
                locations[idx][1] = "%s/%s" % (self.top, locations[idx][1])
        self.locations = dict(locations)
        return [{"title": location, "description":"Cameras for %s" % location}
                for location in self.locations.keys()]

    def getImageURL(self, location):
        """getImageURL - returns a dictionary of a location and it's image link
        given a location"""
        print "in getImageURL"
        # This site uses '(' which gui.py uses to split strings. so let's
        # search..
        for needle in self.locations:
            if needle.startswith(location):
                location = needle
                break
        imagepage_url = self.locations[location]
        imagepage_tree = BeautifulSoup(self.getHTML(imagepage_url))
        try:
            image = imagepage_tree.find("div",
                                   attrs={'id': 'cam-detail'}).find("img")
            image_url = image.get("src")
        except:
            print "Could not get image"
            return [{"title": "Traffic Camera",
                 "description": "Could not fetch image for %s" % location}]
        if not image_url.startswith("http"):
                image_url = "%s/%s" % (self.top, image_url)
        return image_url

    def getImages(self, location):
        """getImageURL - returns a dictionary of a location and it's image link
        given a location"""
        image_url = self.getImageURL(location)
        dest_filename = "%s/%s" % (self.CACHE_PATH, int(time.time()))
        try:
            dest_filename += "-%s" % image_url.split("/")[-1]
            # This service has &s in the image name
            dest_filename = dest_filename.split('&')[0]
            urllib.urlretrieve(image_url, dest_filename)
        except:
            print "Oops could not retrieve camera image"
            return [{"title": "Traffic Camera", "pic": "",
                     "description": " %s retrieval failed" % location}]

        return [{"title": "Traffic Camera", "pic": dest_filename,
                 "description": " %s Camera" % location}]


def main():
    """Main function. Called when this file is a shell script"""
    import tempfile
    import shutil
    tmp_dir = tempfile.mkdtemp()

    usage = "usage: %prog [options]"
    parser = optparse.OptionParser(usage)

    parser.add_option("-u", "--url", dest="url",
                      default=DEFAULT_URL,
                      type="string",
                      help="Link to cam page.")

    (options, args) = parser.parse_args()
    scraper = Scrape(options.url, tmp_dir)
    cities = scraper.getCities()
    for city in cities:
        print "Locations for %s" % city['title']
        locations = scraper.getLocations(city['title'])
        for location in locations:
            print "\t%s" % location['title']
            print "Image: > %s" % scraper.getImageURL(location['title'])
            print "Image download > %s" % scraper.getImages(location['title'])

    shutil.rmtree(tmp_dir)
if __name__ == '__main__':
    main()
