import cgi
import os
import logging

from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.api.labs import taskqueue
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from django.utils import simplejson as json
from xml.etree import ElementTree

# base url for the search, parameterized by price. the cron job will find 
# the most recent NUM_PER_INCREMENT listings for each $PRICE_STEP increment in 
# the price range
BASE_URL = "http://annarbor.craigslist.org/search/apa?query=&catAbbreviation=apa&minAsk=%d&maxAsk=%d&bedrooms=&addTwo=purrr"
PRICE_RANGE = (900, 1400)
PRICE_STEP = 50
NUM_PER_INCREMENT = 10

# this must contain a maps api key generated for the url of your site 
# (e.g http://yourapp.appspot.com)
# see http://code.google.com/apis/maps/signup.html
MAPS_API_KEY = "ABQIAAAAyitYzLC9zbk8k_Teq6152hStqFlVZDnCQQRDCUTkx15folywRxSxoWMJuzJfdrbx_u9uipBTJ9giWA"

class MainPage(webapp.RequestHandler):
  "Serves index.html"
  def get(self):    
    template_values = {"mapskey" : MAPS_API_KEY, "draining" : drainingTasks()}
    path = os.path.join(os.path.dirname(__file__), 'index.html')
    self.response.out.write(template.render(path, template_values))

class Listings(webapp.RequestHandler):
  "Handles ajax request for listings"
  def get(self):
    jsonlistings = memcache.get("jsonlistings")
    if jsonlistings is None:
        all = db.GqlQuery("SELECT * FROM ListingStore ORDER BY created DESC LIMIT 60")
        dictlistings = map(todict, all)
        jsonlistings = json.dumps(dictlistings)
        if (len(dictlistings) > 0):
          if not memcache.add("jsonlistings", jsonlistings, 60):
            logging.error("Memcache set failed,")
    self.response.out.write(jsonlistings)
    
class DrainTasks(webapp.RequestHandler):
  "toggles task draining"
  def get(self):
    draining = memcache.get("draintasks") is not None
    if draining:
      memcache.delete("draintasks")
    else:
      memcache.delete("jsonlistings")
      memcache.add("draintasks", 1, 3600)
    current = not draining
    template_values = {"draining" : current}
    path = os.path.join(os.path.dirname(__file__), 'drainingtasks.html')
    self.response.out.write(template.render(path, template_values))

def drainingTasks():
  return memcache.get("draintasks") is not None  

class RefreshCron(webapp.RequestHandler):
  """Cron job that refreshes listings for the housing url, firing off
  a task for each PRICE_STEP increment within the range."""
  def get(self):
    import pickle
    # clear out listings, cache
    memcache.delete("jsonlistings")
    all_keys = ListingStore.all(keys_only=True)
    db.delete(all_keys.fetch(limit=100))
    
    if drainingTasks():
      logging.info("skipping posting of CraigsRssTasks because we are draining.")
      return    

    for price in xrange(PRICE_RANGE[0], PRICE_RANGE[1], PRICE_STEP):
      url = BASE_URL % (price + 1, price + PRICE_STEP)
      taskqueue.add(
        url="/task/craigsrss",
        payload=pickle.dumps(url),
        headers={"Content-Type": "application/octet-stream"})
    self.redirect("/")

class CraigsRssTask(webapp.RequestHandler):
  """taks in a url to a craigslist rss feed, and grabs
  each entry.  for each one, fires off a DetailsTask to
  get the detail of each listing."""
  def post(self):
    if drainingTasks():
      logging.info("skipping CraigsRssTask because we are draining.")
      return
    import pickle
    url = pickle.loads(self.request.body) + "&format=rss"
    if memcache.get(url) is not None:
        logging.info("skipping CraigsRssTask for %s because it has already completed in past hour" % url)
        return
    listings = parseListings(fetchContent(url))

    logging.info("fetched %d listings from '%s'" % (len(listings), url))
    for listing in listings[:NUM_PER_INCREMENT]:
      taskqueue.add(
        url='/task/fetchdetails', 
        payload=pickle.dumps(listing), 
        headers={"Content-Type": "application/octet-stream"})
    # remember that we've done this to be somewhat idempotent
    # (reposting details tasks is problemeatic, and app engine task queues
    #  will sometimes re-execute successfully completed tasks)
    memcache.add(url, 1, 3600)
    self.redirect("/")

class DetailsTask(webapp.RequestHandler):
  """Fetches details from one listing and stores the full listing in
  the datastore.  The listing is passed in the request body as a 
  pickled dictionary."""
  def post(self):
    if drainingTasks():
      logging.info("skipping DetailsTask because we are draining.")
      return
    import pickle
    listing = pickle.loads(self.request.body)
    url = listing["url"]

    text = fetchContent(url)
    address = extractAddr(text)
    if (address is None):
      logging.info("couldn't extract address from " + url)
      return
    photos = extractPhotos(text)
    lat,longit = parseGeocode(fetchContent(geocodeUrl(address)))
    if lat is None:
      logging.info("couldn't geocode address of '%s' from %s" % (address, url))
      return
    listingStored = ListingStore(
      title=listing["title"],
      url=url,
      address=address,
      photos=photos,
      lat=lat,
      longit=longit)
    listingStored.put()

class ListingStore(db.Model):
  "The model for a housing listing"
  title = db.StringProperty(required=True)
  url = db.StringProperty(required=True)
  address = db.StringProperty(required=True)
  lat = db.StringProperty(required=True)
  longit = db.StringProperty(required=True)
  photos = db.StringListProperty(required=True)
  created = db.DateTimeProperty(auto_now_add=True)

def todict(listingStored):
  """converts a ListingStore object into a dictionary (the format
  that is passed between tasks, and that is amenable to JSON conversion)"""
  lat = listingStored.lat
  if len(lat) is 0:
    lat = None
  longit = listingStored.longit
  if len(longit) is 0:
    longit = None
  return makeListing(
    address=listingStored.address,
    lat=lat,
    longit=longit,
    title=listingStored.title,
    url=listingStored.url,
    photos=listingStored.photos)   

def makeListing(address, title, url, description=None, date=None, lat=None, longit=None, photos=None):
  "makes a dictionary of listing attributes, filling in defaults of None"
  return {"address" : address, 
  "title" : title, 
  "url" : url, 
  "description" : description,
  "date" : date, 
  "uncoded_reason" : "",
  "photos" : photos,
  "lat" : lat,
  "longit" : longit}

def fetchContent(url):
  result = urlfetch.fetch(url)
  if result.status_code != 200:
    raise "bogus response code %d" % result.status_code
  return result.content

# the namespace of the RSS feed from craigslist
NS = "http://purl.org/rss/1.0/"
DC_NS = "http://purl.org/dc/elements/1.1/"

def parseListings(content):
  """parses out the initial details of the listings from text
  that contains an RSS feed from a craiglist query."""

  def getChild(element, tag):
    "grabs the content of an xml child with a given tag in the 'dc:' namespace."
    child = element.find("./{%s}%s" % (DC_NS, tag))
    if child is not None:
      return child.text
    else:
      return ""

  rss = ElementTree.fromstring(content)
  listings = []
  for element in rss.findall(".//{%s}item" % NS):
    listings.append(makeListing(
          address="",
          title=getChild(element, "title"),
          date=getChild(element, "date"),
          url=getChild(element, "source")))
  return listings

def extractAddr(text):
  "extracts the address from the text of a listing."
  import urllib
  import re
  elements = re.findall(r"href=\"http://maps\.google\.com/\?q=(.*)\"", text)
  if len(elements) < 1:
    return None
  else:
    return urllib.unquote_plus(elements[0])

def extractPhotos(text):
  import re
  "extracts a list of photo urls from a listing."
  return re.findall(r"img.*src=\"(.*jpg)\"", text)

def geocodeUrl(address):
  """Geocodes an address string using the google maps geocoding api.
  address: a string with an address
  returns: a csv per the google maps documentation"""
  import urllib
  return "http://maps.google.com/maps/geo?" +  \
    urllib.urlencode({
      "q": address, 
      "key" : MAPS_API_KEY,
      "sensor" : "false",
      "output" : "csv",
      "oe" : "utf8"})

def parseGeocode(response):
  """Parses the response from the geocoding request, making sure
  the response code is as expected, logging an error otherwise.
  
  response: a csv of status, accuracy, latitude and longitude
  returns: (latitude, longitude) as strings, or (None, None)
           on failure.
  """
  (status, accuracy, lat, longit) = response.split(",")
  if int(status) != 200:
    logging.info("geocoding failed with status " + status)
    return None, None
  return lat,longit    
        
application = webapp.WSGIApplication([
    ('/', MainPage),
    ("/listings", Listings),
    ("/cron/refreshlistings", RefreshCron),
    ("/task/fetchdetails", DetailsTask),
    ("/task/craigsrss", CraigsRssTask),
    ("/admin/draintasks", DrainTasks)
    ],
    debug=True
  )

def main():
  run_wsgi_app(application)

if __name__ == "__main__":
  main()
