# Standard Python library imports
import getopt
from datetime import time
from urllib2 import urlopen

# 3rd party library imports
from BeautifulSoup import BeautifulSoup

recordCount = 0  # We start at zero and increment for each page scrape
parcelIds = []  # Holds the parcel IDs of the records to be scraped

def retrieveParcelIds(idList):
    pass

def retrieveHTMLPage():
    sdatBaseAddress = "http://sdatcert3.resiusa.org/rp_rewrite/details.aspx?"
    sdatCountyField = "County=01&SearchType=ACCT&District=29&AccountNumber="
    pass

def parseHTMLPage():
    pass

def writeDatabaseRecord():
    pass

def timeStamp():
    return datetime.strftime('%Y-%m-%d'),
        time.strftime('%I:%M:%S %p')

def checkScheduleTime():
    pass

#for ul in soup.html.body.findAll("table",attrs={"cellspacing" : "0","border" : "0"},recursive=True):
#    #print ul.findNext().renderContents() ul.findNext().find('td').renderContents()
#    print ul.findNext().renderContents()

def main():
    # Our main function
    print 'Starting Web scrape on %s at %s' % ()
     
if __name__ == '__main__':
    main()
