import re
from collections import defaultdict
import json
from datetime import datetime
from pprint import pprint
from spreadsheetdb import Bundle

def listaccounts(client, version=None):
  """
  Parameters:
    client - Google doc connection object
    version - the version of the UOIT pages to list
  Returns:
    a map of username to account references
  """
  pages = dict()
  for (title, f) in client.listfolder(title='Science Pages').items():
    if title.startswith("pages"):
      (_, username, ver) = [x.strip() for x in title.split("-")]
      if version and not version == ver:
        continue
      f.update(dict(username=username, ver=ver))
      pages[username] = f
  return pages

def clean(s):
  return re.sub(r'[^a-z0-9_]', '', s.lower())

def cleanrecord(r, wstitle=None):
  for key in (x for x in r.keys() if x.startswith("year")):
    try:
      year = int(r[key])
      if year < 100:
        year += 2000
    except:
      year = None
    r[key] = year
  for key in (x for x in r.keys() if x.startswith("month")):
    month = None
    try:
      month = int(r[key])
    except:
      if isinstance(r[key], str):
        lookup = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep",
        "oct", "nov", "dec"]
        try:
          month = lookup.index(r[key].lower()[:3])+1
        except:
          month = None
    r[key] = month

  return Bundle(r)
  
def sheetdata(client, sheets, title):
  data = dict()
  skey = sheets[title]['key']
  ws = client.listsheet(skey)
  for (wstitle, w) in ws.items():
    data[clean(wstitle)] = filter(lambda x: not x==None, 
       list(cleanrecord(r, wstitle) for r in client.data( skey, w['key'])))
    print "\t>>from %s/%s, fetched %d entries." % (title,wstitle,
        len(data[clean(wstitle)]))
  return data

def loadpages(client, f, cache=None, forcehtml=False, forceload=False):
  """ Loads all the data associated with an account.
  Parameters:
    client - the Google doc connection object
    f - dict(uri=...) is the ref to the account folder
    prev_data - previously loaded data (json)
  Returns:
    Nested map of spreadsheet -> worksheet -> list(tuples)
    Information -> dict(property -> value)
  """

  if cache:
    try:
      with open(cache, "r") as cachef:
        prev_data = json.load(cachef)
    except:
      prev_data = None
  else:
    prev_data = None

  sheets = client.listfolder(uri=f['uri'])

  do_load = False
  if forceload:
    do_load = True
  else:
    if prev_data and prev_data['updated'] >= sheets['updated']:
      do_load = False
    else:
      do_load = True


  if not do_load:
      data = prev_data
  else:
    data = dict()
    #
    # Information
    #
    ws = client.listsheet(sheets['Information']['key'])
    data['updated'] = sheets['updated']
    data['information'] = client.mapdata(
      sheets['Information']['key'],
      ws['info']['key'])
    
    for title in ['Teaching', 
                  'Supervision',
                  'Funding',
                  'Awards',
                  'Research Activities',
                  'Publications',
                  'Service']:
      data[clean(title)] = sheetdata(client, sheets, title)

    # Clean up dirty data
    clean_pub = dict()
    for x, pubs in data['publications'].items():
      clean_pub[x] = [p for p in pubs if p['title'] and p['title'].strip()]
    data['publications'] = clean_pub

  if cache and do_load:
    try:
      with open(cache, "w") as cachef:
        json.dump(data, cachef)
    except:
      pass

  if (not do_load) and (not forcehtml):
    raise Exception("No load needed")

  return data
