#!/usr/bin/python
#
# -*- coding: utf-8 -*-

# Copyright 2011 Google Inc. All Rights Reserved.

from BeautifulSoup import BeautifulSoup as BS
from BeautifulSoup import Tag

import datetime
import re
import urllib

now = datetime.datetime.utcnow()

# The very, very, VERY!!, loose formatting is
#
# <h3>region</h3>
# <strong>street</strong> - description
# ...

# strong sections which we are blacklisting because they aren't relevant
blacklist = (
  'WARNINGS: Roads not closed, but drivers should approach with caution'
)

# some strong sections have both the road and description within the strong tag so split on ' - ' however whitelist these to exclude
split_whitelist = (
  'Ballarat - Burrumbeet Road'
)

# Format the street which in theory is the content between <strong> tags
def extract_street(str):
  if str is None:
    return None

  str = fix(str)
  if str in blacklist:
    return None

  # hack - some strong content encloses a tailing hyphen
  if str.endswith(' -'):
    str = str[0:len(str) - 2]

  # another hack - there is this massive long "advisery" in the middle so lets just ignore anything over 75 chars
  if len(str) > 75:
    return None

  # another... some of the road closures include the description as part of the string information (which is why we return a tuple)
  if str.find(' - ') != -1 and str not in split_whitelist:
    return str.split(' - ')

  return (str, None)

# Format the description which is in theory the text following the strong block
def extract_desc(str):
  if str is None:
    return None

  str = fix(str)
  
  # Strip off an leading hypen
  if str.startswith('-'):
    str = str[1:].strip()

  # visual fix
  if str.startswith('close'):
    str = 'C' + str[1:]

  # readability fix
  if str.startswith('at '):
    str = 'Closed ' + str

  return str

def fix(str):
  # random-ness pruning
  # remove \' to avoid FT insert query problems
  return str.replace('<br />','').replace('&nbsp;', ' ').replace('<span style="color: #000000;">', '').replace('</span>', '').replace("'",'').strip()

def print_closure(road, desc):
  print '"' + road + ', Victoria, Australia","' + road + '","' + str(now) + '","' + str(desc) + '"'

def scrapyscrapy():
  bs = BS(urllib.urlopen("http://mobiletraffic.vicroads.vic.gov.au/floodalerts/").read())

  closures = bs.fetch('strong')
  for closure in closures:

    # hack because in some parts multiple road closure entries are within the single <strong> tag
    if closure.string is None:
      for x in closure.renderContents().split('<br />'):
        result = extract_street(x)
        if result is not None:
          print_closure(result[0], None)
      continue

    result = extract_street(closure.string)
    if result is not None:
      (road, desc) = result

      # "standard" case
      if desc is None:
        desc = extract_desc(closure.findNextSibling(text = True))

      # more hacks for region outside of strong
      if desc is not None and desc.startswith(','):
        road = road + desc
        desc = None

      print_closure(road, desc)

if __name__ == "__main__":
  scrapyscrapy()
