# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module handles interfacing with PubSubHubbub using the protocol defined
at: http://code.google.com/p/pubsubhubbub/
"""

import os
#os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')

import sys
sys.path.insert(0, "")
sys.path.insert(0, "..")

import datetime
import feedparser
import logging
import string
import urllib

from common.dbclasses import MessageToMatch
from HTMLParser import HTMLParser
from pshb_common import WatchedFeed
from pshb_common import PSHBCache

from google.appengine.api import memcache
from google.appengine.api import prospective_search
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.api.app_identity import get_default_version_hostname

from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util

# Message storage is turned off in production
STORE_MATCHED_DOC_IN_DB = False
# Maximum requested memcache age is 30 hours.
MAX_AGE_IN_MEMCACHE = 30 * 60 * 60
# Number of items to display on each listing page
PAGE_SIZE = 20

class SeenEntry(db.Model):
  """Persists URLs of Entries previously seen. key_name=<entry url> """
  first_seen = db.DateTimeProperty()
  updated = db.DateTimeProperty()

def ExtractFeedMetadata(feed):
  """Given a parsed feed, returns hub, URL, and title of the feed """
  hub_href = ""
  feed_href = ""
  if feed.bozo:
    return None, None, None
  for link in feed.feed.links:
    if "rel" in link:
      if link.rel == "hub":
        hub_href = link.href
      if link.rel == "self":
        feed_href = link.href
      if feed_href and hub_href:
        break
  if not hub_href or not feed_href:
    logging.warning("Missing Feed Metadata: Feed %s, Hub: %s, Title: %s" % (
                    feed_href, hub_href, feed_href))
    return None, None, None
  return hub_href, feed_href, feed.feed.title


class TextExtractor(HTMLParser):
  """Simple class to extract text-only from HTML text. Clearly, this is a very
  rudimentary implementation and many improvements could be made."""
  def feed(self, data):
    self.text = ""
    self.script = 0
    self.style = 0
    self.object = 0
    HTMLParser.feed(self, data)
    HTMLParser.close(self)
    return self.text

  def handle_starttag(self, tag, attrs):
    if tag == "script":
      self.script += 1
    elif tag == "style":
      self.style += 1
    elif object == "object":
      self.object += 1

  def handle_endtag(self, tag):
    if tag == "script" and self.script:
      self.script -= 1
    elif tag == "style" and self.style:
      self.style -= 1
    elif object == "object" and self.object:
      self.object -= 1

  def handle_startendtag(self, tag, attrs):
    # We need to override the default behavior
    pass

  def handle_data(self, data):
    # Don't use data if we're not inside a text tag. (i.e. no script or style)
    # Note: Because we're not inserting hard breaks between segments, there may
    # be some incorrect phrase matching.
    non_text = self.script + self.style + self.object
    if not non_text:
      self.text += data + " "


class CronHandler(webapp.RequestHandler):
  """Handle periodic cleanup of SeenEntries. """
  def get(self):
    # Find all "too-old" entries and delete them.
    cut_off = datetime.datetime.utcnow() - datetime.timedelta(days=1)
    query = SeenEntry.all(keys_only=True).filter('first_seen <', cut_off)
    delete_count = 0
    batch_size = 100
    finished = False
    while not finished:
      pending_delete = query.fetch(limit=batch_size)
      fetched_count = len(pending_delete)
      finished = fetched_count < batch_size
      try:
        if fetched_count:
          db.delete(pending_delete)
      except:
        logging.error("Delete failed. %d SeenEntry Records." % (fetched_count))
        break
      else:
        delete_count += fetched_count
        logging.info("Deleted %d SeenEntry Records." % (fetched_count))
    self.response.out.write("Deleted %d SeenEntry Records." % (delete_count))


class FeedListHandler(webapp.RequestHandler):
  def get(self):
    """Process requests to list feeds to which we are currently subscribed. """
    bookmark = urllib.unquote(self.request.get("bookmark"))
    feeds, next = WatchedFeed().GetPageToDisplay(PAGE_SIZE, bookmark)
    template_values = {'feeds': feeds,
                       'bookmark': bookmark,
                       'first_page': self.request.path,
                       'next': next,
                       'admin_user': False,
                       }
    path = os.path.join(os.path.dirname(__file__), 'pshb_feeds.html')
    self.response.out.write(template.render(path, template_values))


class FeedAdministration(webapp.RequestHandler):
  """Handles administration of feeds and all communications to hub."""
  def get(self, message=""):
    """Process request for Feed Administration Page"""
    if not users.is_current_user_admin():
      if users.get_current_user() is None:
        self.redirect(users.create_login_url(self.request.uri))
      else:
        self.response.set_status(403)
        self.response.headers['Content-Type'] = "text/plain"
        self.response.out.write("Forbidden. Admin access only.")
      return
    bookmark = urllib.unquote(self.request.get("bookmark"))
    feeds, next = WatchedFeed().GetPageToDisplay(PAGE_SIZE, bookmark)
    template_values = {'feeds': feeds,
                       'bookmark': bookmark,
                       'first_page': self.request.path,
                       'next': next,
                       'admin_user': True,
                       'memcache_stats': memcache.get_stats(),
                       'message': message,
                       }
    path = os.path.join(os.path.dirname(__file__), 'pshb_feeds.html')
    self.response.out.write(template.render(path, template_values))

  def post(self):
    """Process requests to either subscribe to or unsubscribe from a feed """
    result = ""
    if "unsubscribe" in self.request.arguments():
      # There may be multiple feeds from which we need to unsubscribe
      urls = self.request.get_all("unsubscribe")
      for url in urls:
        url = url.strip()
        if url:
          self._Unsubscribe(url)
    if "subscribe" in self.request.arguments():
      # There can only be one new feed to subscribe to.
      url = self.request.get("subscribe").strip()
      if url:
        result = self._Subscribe(url)
    self.get(message=result)

  def _Subscribe(self, feed_url):
    """Subscribes to a feed"""
    feed_url = string.strip(feed_url)
    if not feed_url:
      logging.warning("Can't subscribe without URL.")
      return "Can't subscribe without an URL."
    hub, feed_url, title = self._FetchFeedMetadata(feed_url)
    if not hub:
      return "No hub found in feed."
    if not feed_url:
      return "No feed URL found in feed."
    hub_and_url = "Hub: %s, Feed: %s" % (hub, feed_url)
    if self._SendToHub(hub, "subscribe", feed_url):
      dummy_date = datetime.datetime(year=2000, month=1, day=1)
      WatchedFeed.get_or_insert(key_name=feed_url, hub=hub, title=title,
                                expiration=dummy_date, refresh=0)
      logging.info("New Feed Subscription: %s, %s" % (feed_url, hub))
    else:
      logging.warning("Subscribe failed: " + hub_and_url)
      return "Subscription failed."
    return ""

  def _Unsubscribe(self, feed_url):
    """Unsubscribes a feed previously subscribed to"""
    feed_url = string.strip(feed_url)
    if not feed_url:
      logging.warning("Can't unsubscribe without URL.")
      return
    PSHBCache().DeleteFeed(feed_url)
    feed = None
    try:
      feed = WatchedFeed().get_by_key_name(feed_url)
    except:
      logging.error("Can't delete missing feed: " + feed_url)
      return
    hub_and_url = "Hub: %s, Feed: %s" % (feed.hub, feed_url)
    if self._SendToHub(feed.hub, "unsubscribe", feed_url):
      try:
        feed.delete()
      except:
        logging.warning("Unsubscribe failed." + hub_and_url)
      else:
        logging.info("Feed unsubscribed." + hub_and_url)
    else:
      logging.warning("Unsubscribe failed." + hub_and_url)

  def _SendToHub(self, hub, mode, feed_url):
    """Communicates with PubSubHubbub Hub to either subscribe or unsubscribe."""
    quoted_url = urllib.quote(feed_url)
    form_data = urllib.urlencode({
      "hub.callback": 'http://%s/pshb/callback/%s' % (
                      get_default_version_hostname(), quoted_url),
      "hub.mode": mode,
      "hub.topic": feed_url,
      "hub.verify": "async"
    })
    result = urlfetch.fetch(
      url=hub,
      payload=form_data,
      method=urlfetch.POST,
      headers={'Content-Type': 'application/x-www-form-urlencoded'})
    if not str(result.status_code)[0:1] == '2':
      debug_message = "Hub %s failed with %s" % (mode, result.status_code)
      logging.info(debug_message)
      return False
    return True

  def _FetchFeedMetadata(self, feed_url):
    """ Given feed url, find its hub, link and title."""
    response = ""
    try:
      response = urlfetch.fetch(url=feed_url, method=urlfetch.GET)
    except:
      return "", "Can't find that feed: %s" % (feed_url)
    if not (str(response.status_code)[0:1] == '2'):
      logging.warning("Fetch for %s failed. Status Code: %d" % (
                      feed_url, response.status_code))
      return None, None, None
    return ExtractFeedMetadata(feedparser.parse(response.content))


class CallbackHandler(webapp.RequestHandler):
  """Handles the PSHB Content Delivery Callback """
  def get(self):
    """GETs are used for verification only."""
    hub_mode = self.request.get("hub.mode").lower()
    hub_topic = self.request.get("hub.topic")
    hub_challenge = self.request.get("hub.challenge")
    hub_lease_seconds = self.request.get("hub.lease_seconds", "0")
    if not hub_mode or not hub_topic or not hub_challenge:
      logging.warning("Missing parameters")
      self.response.set_status(404)
      self.response.out.write("Bad request. Missing Parameters")
      return
    self.response.out.write(hub_challenge)
    if hub_mode == "subscribe":
      if self._VerifySubscribe(hub_topic, hub_lease_seconds):
        self.response.set_status(200)
      else:
        logging.warning("Subscribe rejected for: " + hub_topic)
        self.response.set_status(404)
    elif hub_mode == "unsubscribe":
      if self._VerifyUnsubscribe(hub_topic):
        self.response.set_status(200)
      else:
        logging.warning("Unsubscribe rejected for: " + hub_topic)
        self.response.set_status(404)
    else:
      logging.warning("UnKnown mode: %s for : %s" % (hub_mode, hub_topic))
      self.response.set_status(404)

  def post(self):
    """Process new messages received from PSHB Hubs."""
    # We always respond with success since anything else will cause retries.
    self.response.set_status(204)
    content_type = self.request.headers.get("Content-Type", "")
    if not self._IsSupportedContentType(content_type):
      logging.warning("Unsupported Content Type: " + str(content_type))
      return
    body = self.request.body
    parsed_feed = feedparser.parse(body)
    if parsed_feed.bozo:
      return
    hub, feed_url, feed_title = ExtractFeedMetadata(parsed_feed)
    if not feed_url:
      return
    if not PSHBCache().FeedIsWatched(feed_url):
      logging.warning("Unwatched Feed: %s, Hub: %s" % (feed_url, hub))
      return
    match_count = 0
    stale_count = 0
    already_cached = self._UpdateMemcache(parsed_feed)
    for entry in parsed_feed.entries:
      if not self._IsFreshAndUnseen(entry, already_cached):
        stale_count += 1
        continue
      doc = self._BuildMessageToMatch(feed_url, feed_title, entry)
      if not doc:
        continue
      if self._MatchMessage(doc):
        match_count += 1
        if STORE_MATCHED_DOC_IN_DB:
          doc.put()
    logging.info("Feed: %s Count: %d, Match: %d, Cache: %d, Stale: %d" %
                 (feed_url, len(parsed_feed.entries), match_count,
                  len(already_cached), stale_count))

  def _IsSupportedContentType(self, content_type):
    # Remove charset data and any other additional data.
    media_type = content_type.split(';')[0].strip().lower()
    return {'application/atom+xml': True,
            'application/rss+xml': True,
            'application/rdf+xml': True,
           }.get(media_type, False)

  def _UpdateMemcache(self, parsed_feed):
    """Returns list of entries already in memcache and adds new if needed"""
    check_list = dict((entry.link, 1) for entry in parsed_feed.entries)
    return memcache.add_multi(check_list, time=MAX_AGE_IN_MEMCACHE)

  def _IsFreshAndUnseen(self, entry, already_cached):
    """Ensures that current entry is good,  unseen and not too old."""
    if not "link" in entry:
      # If there is no link, we have nothing about which to notify.
      logging.info("No Link in %s" % (entry.title))
      return False
    if entry.link in already_cached:
      return False
    if not entry.updated_parsed:
      logging.info("Entry with no date: " + entry.link)
      return False
    now = datetime.datetime.utcnow()
    updated = datetime.datetime(*entry.updated_parsed[0:6])
    age = now - updated
    if age.days:
      # Entries older than one day or from the future are rejected
      return False
    # Store item in db if not already there. Cron job cleans these up.
    entry_in_db = SeenEntry().get_or_insert(entry.link,
                                            first_seen=now,
                                            updated=updated)
    return entry_in_db.first_seen == now

  def _MatchMessage(self, doc):
    """Send a message to the Prospective Search Service for matching.
    The matching subscriptions will be returned via TaskQueue."""
    try:
      prospective_search.match(doc, result_batch_size=10)
    except:
      logging.info("Sorry... %s, %s, %s" % (sys.exc_info()))
      return False
    else:
      return True

  def _BuildMessageToMatch(self, feed_url, feed_title, entry):
    """Construct the document to be matched and include a snippet which is
    suitable for presentation to the user."""
    doc = MessageToMatch()
    doc.follow = feed_url
    doc.track = entry.title + "\n"
    if "content" in entry:
      doc.track += entry.content[0].value + "\n"
    elif "summary" in entry:
      doc.track += entry.summary + "\n"
    else:
      return None
    doc.track = TextExtractor().feed(doc.track)
    doc.size = len(doc.track)
    doc.snippet = '"%s" %s via PSHB' % (entry.title, entry.link)
    return doc

  def _VerifySubscribe(self, url, hub_lease_seconds):
    """Check to ensure that subscribe event is for a watched feed."""
    feed = WatchedFeed.get_by_key_name(url)
    if feed:
      feed.refresh = feed.refresh + 1
      feed.expiration = datetime.datetime.now() + \
                        datetime.timedelta(seconds=int(hub_lease_seconds))
      feed.verified = datetime.datetime.now()
      feed.put()
      # Ensure that we have a local cached copy of record
      PSHBCache().AddFeed(url, hub=feed.hub, title=feed.title)
      return True
    else:
      return False

  def _VerifyUnsubscribe(self, url):
    """Checks to ensure that unsubscribed feed not in database """
    feed = WatchedFeed.get_by_key_name(url)
    if not feed:
      # Ensure that feed isn't in cache. Unsubscribe request might have been
      # requested by a different instance.
      PSHBCache().DeleteFeed(url)
      return True
    else:
      return False

def main(argv):
  application = webapp.WSGIApplication(
      [('/pshb/callback/.*', CallbackHandler),
       ('/pshb/admin/cron', CronHandler),
       ('/pshb/admin', FeedAdministration),
       ('/pshb', FeedListHandler),
      ], debug=True)
  util.run_wsgi_app(application)

if __name__ == '__main__':
  main(sys.argv)
