# Python imports
import logging
import urllib
from datetime import datetime, timedelta
import xml.sax

# AppEngine imports
from google.appengine.api import urlfetch
from google.appengine.api import memcache

# Django imports
from django.conf import settings
from django.utils import simplejson

# Local imports 
import views
import dateutil.parser
import models

MAX_FETCHES = 2                           # Only fetch 2 feeds per request
MAX_FEED_AGE = timedelta(seconds=3600)    # Check feeds every hour
MAX_FEED_MEMCACHE_SECONDS = 604800        # Store feeds in memcache for a week
MAX_ENTRIES = 15                          # Return only 15 most recent entries

class AtomFeed(object):
  """
  Creates lists of blog entries from ATOM compliant feeds
  """
  num_fetches = None
  
  def __init__(self):
    self.num_fetches = 0
  
  def get_by_url(self, url, label=None):
    """
    Attempts to retrieve the feed at the specified URL and return a list of 
    feed entry dictionaries.
    """
    if label is None:
      label = url
    key_name = "feed|%s" % label
    feed = memcache.get(key_name) 

    if feed is None:
      logging.info("Could not find BlogFeed [%s] in memcache", key_name)
      feed_expiration = datetime.now() + timedelta(seconds=-1)
    else:
      logging.info("Got BlogFeed [%s] from memcache", key_name)
      feed_last_updated = feed.get('last_updated', datetime.now())
      feed_expiration = feed_last_updated + MAX_FEED_AGE
    
    if feed_expiration < datetime.now() and self.num_fetches < MAX_FETCHES:
      logging.info("Fetching [%s]", url)
      self.num_fetches = self.num_fetches + 1
      result = urlfetch.fetch(url)
      if result.status_code == 200:
        feed = self.parse_feed(result.content)
        if feed is not None:
          memcache.set(key_name, feed, MAX_FEED_MEMCACHE_SECONDS)
      
    return feed
  
  def parse_feed(self, content):
    parser = xml.sax.make_parser()
    handler = AtomEntriesHandler()
    parser.setContentHandler(handler)
    xml.sax.parseString(content, handler)
    entries = handler.entries

    return { 
        "last_updated" : datetime.now(),
        "entries" : entries,
    }
  

class AtomEntriesHandler(xml.sax.handler.ContentHandler):
  def __init__(self):
    self.current_value = None
    self.entries = []
    self.entry = None
    self.feed_title = ""
    self.feed_url = ""
    self.has_content = False

  def startElement(self, name, attributes):
    if self.entry is not None:
      if name == "title":
        self.current_value = "title"
      elif name == "link" and attributes.get("rel", "") == "alternate":
        self.entry["url"] = attributes.get("href", "")
      elif name == "content":
        self.has_content = True
        self.current_value = "content"
      elif name == "summary":
        if self.has_content == False:
          self.current_value = "content"
      elif name == "updated":
        self.current_value = "updated"
    else:
      if name == "entry":
        self.entry = {
          "feed_title" : self.feed_title,
          "feed_url" : self.feed_url,
        }
      elif name == "title":
        self.current_value = "feed_title"
      elif name == "link" and attributes.get("rel", "") == "alternate":
        self.feed_url = attributes.get("href", "")

  def characters(self, data):
    if self.current_value == "feed_title":
      self.feed_title += data
    elif self.entry is not None and self.current_value is not None:
      old_value = self.entry.get(self.current_value, "")
      self.entry[self.current_value] = old_value + data

  def endElement(self, name):
    if name == "entry":
      self.entries.append(self.entry)
      self.entry = None
    elif (name == "title" or
          name == "link" or
          name == "content" or
          name == "summary"):
      self.current_value = None
    elif name == "updated":
      self.current_value = None
      if self.entry is not None:
        updated_time = dateutil.parser.parse(self.entry.get("updated", ""))
        self.entry["updated_datetime"] = updated_time

def sort_entries_by_age(x, y):
  x_time = x.get("updated_datetime")
  y_time = y.get("updated_datetime")
  if x_time > y_time:
    return -1
  elif y_time > x_time:
    return 1
  else:
    return 0
    
def process_entry_for_output(entry):
  time_updated = entry.get("updated_datetime", datetime.now(models.UTC()))
  time_delta = datetime.now(models.UTC()) - time_updated 
  time_str = pretty_time(time_delta.days * 86400 + time_delta.seconds)
  entry["updated_delta"] = time_str
  del entry["updated_datetime"]
  return entry
  
def pretty_time(time):
  if time < 60:
    return "less than a minute ago"
  elif time < 120:
    return "about a minute ago"
  elif time < 3600:
    return "about %d minutes ago" % (time // 60)
  elif time < 7200:
    return "about an hour ago"
  elif time < 86400:
    return "about %d hours ago" % (time // 3600)
  elif time < 172800:
    return "about a day ago"
  elif time < 2592000:
    return "about %d days ago" % (time // 86400)
  elif time < 5184000:
    return "about a month ago"
  else:
    return "about %d months ago" % (time // 2592000)

def list(request):
  entries = []
  parser = AtomFeed()
  for name, url in settings.BLOG_URLS:
    feed = parser.get_by_url(url, name)
    if feed is not None:
      entries.extend(feed.get("entries", []))
    
  entries.sort(sort_entries_by_age)  
  payload = map(process_entry_for_output, entries[:MAX_ENTRIES])
  
  #return views.respond_json(request, status="success", payload=payload)
  return views.respond(request, "news.html", { 
      "entries" : payload,
      "view" : "news",
  })