from __future__ import with_statement
from httplib2 import Http
import logging
import lxml.html
import os
import simplejson
import re
import sys
import time

H = Http()
URI = 'http://www.adm.uwaterloo.ca/cgi-bin/cgiwrap/infocour/salook.pl'

def grab(session, subject, course, level='under'):
    query = (
        'level=%s' % level,
        'sess=%s' % session,
        'subject=%s' % subject,
        'cournum=%s' % course
    )
    response, content = H.request(URI, method='POST', body='&'.join(query))
    if not response.status == 200:
        raise "Could not retrieve course data (Error: %s)" % response.status

    return content

def parse(html):
    COURSE_HEADERS = (
        "Subject",
        "Catalog",
        "Units",
        "Title",
    )

    HEADERS = (
        "Class",
        "Comp Sec",
        "Camp Loc",
        "Assoc. Class",
        "Rel 1",
        "Rel 2",
        "Enrl Cap",
        "Enrl Tot",
        "Wait Cap",
        "Wait Tot",
        "Time Days/Date",
        "Bldg Room",
        "Instructor",
    )

    ERROR_STRING = "Sorry, but your query had no matches."

    def dict_maker(extra_info):
        def row_to_dict(row):
            def clean(cell):
                if cell == '&nbsp':
                    return ''
                else:
                    return cell

            r = [(rw.text or '').strip() for rw in row.findall('td')]
            r = map(clean, r)
            data = zip(HEADERS, r)
            result = dict(data)
            result.update(extra_info)
            result['Time Days/Date'] = parse_date(result['Time Days/Date'])
            return result
        return row_to_dict

    # Bail if we have a bad query
    if ERROR_STRING in html:
        return None

    doc = lxml.html.fromstring(html)
    info_row = doc.xpath('//table')[0].findall('tr')[1].findall('td')
    info = [col.text.strip() for col in info_row]
    course_info = dict(zip(COURSE_HEADERS, info))

    rows = doc.xpath('//table//table')[0].findall('tr')[1:]
    rows = filter(lambda row: len(row) > 11, rows)
    return map(dict_maker(course_info), rows)

def parse_date(date_string):
    def hourize(hour):
        if hour < 8:
            return hour + 12
        else:
            return hour

    def get_days(days):
        day_pattern = re.compile(r'(M|T(?!h)|W|Th|F)')
        return day_pattern.findall(days)

    if 'TBA' in date_string:
        return None

    date_pattern = re.compile(r'(?P<start_hour>\d{2}):(?P<start_minute>\d{2})-(?P<end_hour>\d{2}):(?P<end_minute>\d{2})(?P<days>\w*)')
    matches = date_pattern.match(date_string).groupdict()
    start_hour = hourize(int(matches['start_hour']))
    start_minutes = int(matches['start_minute'])
    end_hour = hourize(int(matches['end_hour']))
    end_minutes = int(matches['end_minute'])

    days = get_days(matches['days'])
    start_time = start_hour * 100 + start_minutes
    end_time = end_hour * 100 + end_minutes


    return (days, start_time, end_time)

def main():
    pattern = re.compile(r'([A-Z]*)(\d*)')

    output = {}
    with open(sys.argv[1], 'r') as f_in:
        courses = f_in.readlines()

    for c in courses:
        c = c.strip()

        logging.warn(c)
        course, code = pattern.findall(c)[0]
        output[c] = parse(grab(sys.argv[2], course, code))
        time.sleep(0.5)

    outfile = "%s.json" % sys.argv[2]

    with open(outfile, 'w') as sched_out:
        simplejson.dump(output, sched_out, sort_keys=True, indent=2)

if __name__ == '__main__':
    main()
