import urllib
import urlparse
from BeautifulSoup import BeautifulSoup
import json
import Queue
import threading
import time
import sys
import re

ROOT_URL = 'http://osoc.berkeley.edu/OSOC/osoc?p_term=SP&p_list_all=Y'
catalog_re = re.compile('<TD><FONT SIZE="-1"><B>Description: </B>(.*?)</FONT></TD>')
GENERAL_CATALOG_URL = 'http://osoc.berkeley.edu/catalog/gcc_search_sends_request'

def get_root(url):
  try:
    return BeautifulSoup(urllib.urlopen(url).read())
  except Exception:
    return None
    

def get_table(root):
  """Returns a list of table elements given a root node"""
  return root.findAll('table')

def get_entries(root):
  """Only use with root_url, returns a list of tr nodes"""
  return root.find('table', cellspacing='0', cellpadding='2').findAll('tr')

def get_text_from_tr(tr):
  """Only use with root_url, returns either:
  [true, major name]
  [false, [major short name, class short name, class long name]]"""
  tds = tr.findAll('td')
  if len(tds) == 1:
    return [True, tds[0].text.strip().replace('&#160;', '')]
  elif len(tds) == 3:
    info = [td.text.strip().replace('&nbsp;', '') for td in tds]
    # They switched to using javascript for some reason, this'll compensate for it
    if not tds[0].label:
      url = None
    else:
      onclick = tds[0].label['onclick']
      if onclick:
        entries = dict(re.findall('\$\(\'\#([a-z_]+)\'\)\.val\(\'([A-Za-z0-9 ,.;_]+)\'\)', onclick))
        entries['p_term'] = 'SP'
        entries['p_print_flag'] = 'N'
        entries['p_list_all'] = 'N'
        url = 'http://osoc.berkeley.edu/OSOC/osoc?' + urllib.urlencode(entries)
      else:
        url = None
    info.append(url)
    return [False, info]

def process_trs(tr):
  """Returns [ majors, classes ] where
  majors = (shortname, longname) and
  classes = (major_shortname, class_shortname, class_longname, url)"""
  current_major = None
  major_key = {}
  classes = []
  for item in tr:
    entry = get_text_from_tr(item)
    # Major long name
    if entry[0] is True:
      current_major = entry[1]
    elif entry[0] is False:
      major_short, class_short, class_long, url = entry[1]
      if not current_major: continue  # header, we ignore this
      if major_short not in major_key:
        major_key[major_short] = current_major
      class_key = '%s - %s' % (major_short, class_short)
      print class_key
      classes.append(entry[1])
  return [ major_key.items(), classes ]
  
def process_class_page(url):
  """Given a url to a search result page, returns a list of dicts"""
  tables = get_table(get_root(url))
  results = []
  for table in tables:
    item = {}
    for tr in table.findAll('tr'):
      result = tr.findAll('td', nowrap=True)
      if not result:  # Not an entry
        continue
      name = result[0].text.strip().replace('&#160;', '')
      if len(result) == 2:
        value = result[1].text.strip().replace('&#160;', '')
      else:
        value = None
      item[name] = value
    if item:
      results.append(item)
  return results
  
def _process_thread(queue, results, total):
  while True:
    try:
      major, short, long, url = queue.get(False)
      results.inc()
      print '%s %s\t(%2d%%)' % (major, short, (results.cur()*100/total))
      result = [major, short, process_class_page(url)]
      # if results.cur() > 50: return  # TODO: REMOVE ME
      results.append(result)
    except Queue.Empty:
      return

class sync_list(object):
  def __init__(self):
    self.lst = list()
    self.lock = threading.Lock()
    self.inc_lock = threading.Lock()
    self.current = 0
  
  def inc(self):
    with self.inc_lock:
      self.current += 1
  
  def cur(self):
    with self.inc_lock:
      return self.current
  
  def append(self, item):
    with self.lock:
      self.lst.append(item)
      
  def pop(self):
    with self.lock:
      self.list.pop()
      
def run_get_classes(classes, num_threads):
  results = sync_list()
  q = Queue.Queue()
  threads = []
  total = len(classes)
  for major_shortname, code, class_longname, url, description in classes:
    q.put((major_shortname, code, class_longname, url))
  for i in range(0, num_threads):
    t = threading.Thread(target=_process_thread, args=[q, results, total])
    t.start()
    threads.append(t)
  for t in threads:
    t.join()
  return results.lst
  
def parse_course(course):
  """Input: string like 'AFRICAN AMERICAN STUDIES 240 P 002 LEC'
  output: ['AFRICAN AMERICAN STUDIES 240', 'P', '002', 'LEC']"""
  tokens = course.split(' ')
  section_type = tokens[-1]
  section_number = tokens[-2]
  primary = tokens[-3] == 'P'  # True if primary, false if secondary
  course = ''.join(tokens[:-3])
  return [course, primary, section_number, section_type]
  
def parse_timeloc(time_location):
  """Input: string like "Th 1030-1130A, 100 2232 PIEDMNT"
  output: ['Th 1030-1130A', '100 2232 PIEDMNT']
  or ['TBA', 'TBA']
  or ['UNSCHED', 'NOFACILITY']
  or ['UNSCHED', 'something else']
  or ['Th 1030-1130A', None]"""
  if not time_location:
    return [None, None]
  tokens = time_location.split(', ')
  if len(tokens) == 2:
    return tokens
  elif time_location.startswith('UNSCHED'):
    return [None, time_location.replace('UNSCHED', '').rstrip()]
  elif len(tokens) == 1:
    if tokens[0] == 'TBA':
      return [None, None]
    else:
      return [tokens[0], None]
  else:
    return [time_location, time_location]
  
  
def process_sections(input):
  """Input: list of [major, short, {data}]
  data has the following keys that are important: 'Course Title:',
  'Instructor:', 'Location:', 'Course Control Number:', 'Status/Last Changed:',
  'Course:', 'View Books_url'
  Output: list of [major shortname, class shortname, section type, section #
  parent, time, location, instructor, ccn, last updated, books url, 
  primary (boolean)]
  Parent is a tuple of (section type, section #)
  """
  results = []
  # The current "Primary" section
  cur = None
  # Iterating through all the classes
  for major, short, all_classes in input:
    if not all_classes:
      # print (major, short)
      pass
    for data in all_classes:
      course_title = data['Course Title:']
      instructor = data['Instructor:']
      time_location = data['Location:']
      ccn = data['Course Control Number:']
      last_changed = data['Status/Last Changed:']
      major_course_name = data['Course:']
      data.setdefault('View Books_url')
      book_url = data['View Books_url']
      if time_location == 'CANCELLED':
        continue
      course, primary, section_number, section_type = parse_course(
          major_course_name)
      if primary:
        cur = (section_type, section_number)
        parent = None
      else:
        parent = cur
      section_time, section_location = parse_timeloc(time_location)
      result = [major, short, section_type, section_number, parent, 
          section_time, section_location, instructor, ccn,  last_changed,
          book_url, primary]
      results.append(result)
  return results

def get_class_description(classes):
  results = []
  for _class in classes:
    major_shortname, code, class_longname, url = _class
    # Fetch a description from the general catalog
    data = {'p_dept_cd': major_shortname, 'p_title': '', 'p_number': code}
    try:
      u = urllib.urlopen(GENERAL_CATALOG_URL, urllib.urlencode(data))
      print 'Fetching description for %s %s...' % (major_shortname, code),
    except Exception, e:
      print 'failed %s' % str(e)
      results.append((major_shortname, code, class_longname, ''))
      continue
    result = catalog_re.search(u.read())
    if result:
      description = result.group(1)
    if not result:
      description = 'N/A'
    results.append((major_shortname, code, class_longname, url, description))
    print 'success'
  return results

      
def main():
  # Get the root DOM object in the master schedule list
  initial_root = get_root(ROOT_URL)
  # Get a list of 'tr' nodes 
  rows = get_entries(initial_root)
  majors = []
  classes = []
  sections = []
  # Majors is complete, we need to parse classes further
  majors, classes = process_trs(rows)
  classes = get_class_description(classes)
  with open('majors.json', 'w') as fd:
    json.dump(majors, fd)
  with open('classes.json', 'w') as fd:
    json.dump(classes, fd)
  i = 0
  total = len(classes)
  results = []
  t1 = time.time()
  results = run_get_classes(classes, 10)
  t2 = time.time()
  with open('log.txt', 'w') as fd:
    fd.write('Time elapsed: %f' % (t2 - t1))
  # Store this as an intermediary
  with open('results.json', 'w') as fd:
    json.dump(results, fd)
  # Results is a list of list as: (major, short, result)
  sections = process_sections(results)
  new_class = []
  with open('sections.json', 'w') as fd:
    json.dump(sections, fd)
    
def just_process(filename):
  with open(filename, 'r') as fd:
    results = json.load(fd)
  # Results is a list of list as: (major, short, result)
  sections = process_sections(results)
  with open('sections.json', 'w') as fd:
    json.dump(sections, fd)
  
if __name__ == '__main__':
  if len(sys.argv) == 2:
    just_process(sys.argv[1])
  else:
    main()