#!/usr/bin/python
# Copyright(c) 2009 Andrew Chatham and Vijay Pandurangan

import mobi
import md5
import cgi
import logging
import os
import sys
import time
import feedparser
import datetime
import urllib

from google.appengine.api import users
from google.appengine.api.urlfetch import fetch
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app

HTTP_OK = 200
HTTP_FORBIDDEN = 403
ENABLE_SECURITY = False

PROBLEM_TYPES = {
  'unconvertable' : 'Download to Kindle failed',
  'unopenable' : 'Download ok, but Kindle could not open the file',
}
BASE_URL = 'http://www.bibliorize.com'
#BASE_URL = 'http://localhost:8080'

BOOKMARKLET = '''<a href="javascript:
s=new XMLSerializer();
d=document;
txt=s.serializeToString(d);
f=d.createElement('form');
f.method='post';
f.action='%s/add_content?bv=1';
i0=d.createElement('textarea');
i0.name='content';
i0.value=txt;
f.appendChild(i0);
i1=d.createElement('input');
i1.name='url';
i1.value=document.location.href;
f.appendChild(i1);
d.body.appendChild(f);
f.submit();">Bibliorize</a>''' % BASE_URL

FEED_BOOKMARKLET = '''<a href="javascript:
 d=document;
 b=d.body;

 if(b&&!document.xmlVersion){
   void(myscript=d.createElement('script'));
   void(myscript.src='%s/static/subscribe.js');
   void(b.appendChild(myscript));
   }
 else {
   location='%s/add_feed?url='+encodeURIComponent(location.href)
   }">
Add Bibliorize Feed</a>''' %(BASE_URL,BASE_URL)


MAX_ENTRIES = 20
MAX_CONTENT_SIZE = 500<<10

def MakeBookId(email):
  return md5.new(SALT + email).hexdigest()[:10]

class ProblemReport(db.Model):
  reported_by = db.UserProperty(required=True)
  urls = db.StringListProperty()
  sizes = db.ListProperty(int)
  fetched = db.ListProperty(bool)
  date = db.DateTimeProperty(auto_now_add=True)
  problem_type = db.StringProperty(required=True,
                                   choices=set(PROBLEM_TYPES.keys()))
class ReadingList(db.Model):
  owner = db.UserProperty(required=True)
  bookid = db.StringProperty(default='')
  url = db.StringProperty(required=True)
  content = db.BlobProperty(default='')
  date = db.DateTimeProperty(auto_now_add=True)
  size = db.IntegerProperty(required=True)
  include_in_list = db.BooleanProperty(default=True)

  fetched = db.BooleanProperty()
  # TODO: use a string property with a limited set of strings
  uri_type = db.IntegerProperty(default=0)
  URI_CRAWL_ONCE = 0
  URI_STATIC_NOCRAWL = 1
  URI_CRAWL_ALWAYS = 2
  URI_FEED = 3
  URI_GOOGLE_PROXIED_FEED = 4
  URI_FEED_CRAWLER = 5
  URI_GOOGLE_PROXIED_FEED_CRAWLER = 6,
  
class FetchError(Exception):
  pass
class BadURLFetchError(FetchError):
  pass
class ContentsTooLargeFetchError(FetchError):
  pass

class BaseRLHandler(object):
  """unimplemented generic handler for Reading Lists"""
  def __init__(self, readinglist):
    self._rl = readinglist

  def put(self):
    ''' This method updates the database with the encapsulated reading list '''
    
    return self._rl.put()

  def _UrlMangler(self, url):
    '''useful for proxies and for unittests'''
    return url

  def _Fetch(self, url_override=None):
    ''' fetches raw contents from url and returns it. no checking is
    performed'''
    try:
      if not url_override:
        f = fetch(self._UrlMangler(self._rl.url))
      else:
        f = fetch(url_override)
    #TODO catch a better exception when i have access to api guide
    except Exception, e:
      raise BadURLFetchError, e
    
    if f.status_code != HTTP_OK:
      raise BadURLFetchError, f
    return self._CheckContent(f.content)
    
  def _CheckContent(self, contents):
    ''' check to see if the contents meet requirements '''
    if len(contents) > MAX_CONTENT_SIZE:
      raise ContentsTooLargeFetchError, len(contents)
    if contents.find('</blockquote ') >= 0:
      logging.info("HAD BAD BLOCKQUOTE")
    return contents
  
  def InitialCrawl(self, _):
    assert False, 'not implemented'
    
  def GetContentsAsList(self, after_timestamp=-1):
    """ return a list of HTML bodies """
    assert False, 'not implemented'

def MakeTSFromRl(dateobj):
  return float(time.mktime(datetime.datetime.utctimetuple(dateobj)))
          
  
class BaseCachedRLHandler(BaseRLHandler):
  def GetContentsAsList(self, after_timestamp=-1):
    
    #  if a timestamp is present and this is not a persistent url, we just
    #  write it out the first time the person downloads a book after this
    #  article is addedo elide books from future books
    if self._IsStale(after_timestamp):
      return []
    else:
      return [self._rl.content]

  def _SetContent(self, content):
    self._rl.content = content
    self._rl.size = len(content)

  def _IsStale(self, timestamp):
    #todo(vijayp): yuck. this can't be the best way to do this
    #TODO: what's the deal with timezones in megastore?
    rval = MakeTSFromRl(self._rl.date)  < float(timestamp)
    if rval:
      logging.info('%s is stale!! %s versus %s.' % (self._rl.url,
                                                    self._rl.date,
                                                    timestamp))
    return rval
  
class CrawlOnceRLHandler(BaseCachedRLHandler):
  def InitialCrawl(self, _):
    assert not self._rl.fetched
    # set up reading list properly.
    self._SetContent(self._CheckContent(self._Fetch()))
    self._rl.fetched = True
    return
    
class StaticRLHandler(BaseCachedRLHandler):
  '''This is meant for static contents fed to us from the user'''
  def InitialCrawl(self, content):
    self._SetContent(self._CheckContent(content))

class CrawlAlwaysRLHandler(CrawlOnceRLHandler):
  '''This URL is crawled every time you make a book.
  '''
  def GetContentsAsList(self, after_timestamp=-1):
    return [self._CheckContent(self._Fetch())]
  
class FeedRLHandler(BaseRLHandler):
  def InitialCrawl(self, _):
    assert not self._rl.fetched
    # ensure feed is crawlable
    #self.GetContentsAsList()

  def _HandleEntry(self, entry):
    val = ''
    try:
      val = entry.content[0].value
    except AttributeError, e:
      try:
        val = entry.summary_detail['value']
      except AttributeError,e:
        logging.info('giving up, %s. %s' % (e, entry))
        val = 'ERROR - COULD NOT EXTRACT CONTENT. PLEASE FILE A BUG.'
    
    minidoc = '''<html>
    <title>%s (on %s)</title>
    <body>%s</body></html>''' % (entry.title,
                                 entry.updated,
                                 val)

        
    # this is possibly the ugliest thing ever
    # TODO(chatham): can you fix this stuff? I assume  you've figured out
    # the unicode mess.
    return minidoc
  
  def GetContentsAsList(self, after_timestamp=-1):
    # fetch now
    content = self._CheckContent(self._Fetch())
    feed = feedparser.parse(content)
    after_timestamp = float(after_timestamp)
    rval = []
    for entry in feed.entries:
      # if the feed was added after the last book refresh, simply
      # refresh the entire feed
      add_timestamp = MakeTSFromRl(self._rl.date)
      logging.debug('addstamp  = %s (%s)', add_timestamp,
                   time.ctime(add_timestamp))
      logging.debug('after     = %s (%s)', after_timestamp,
                   time.ctime(after_timestamp))
      if add_timestamp > after_timestamp:
        this_ts = 0
      else:
        this_ts = after_timestamp
        
      logging.info(' %s vs %s' ,  time.ctime(time.mktime(entry.updated_parsed)),
                   time.ctime(float(this_ts)))
      if  time.mktime(entry.updated_parsed) >= float(this_ts):
        rval.append(self._HandleEntry(entry))
        logging.info('publishing %s because it was not stale' % entry.title)
      else:
        logging.info('elided %s because it was stale', entry.title)
    return rval

class FeedCrawlingRLHandler(FeedRLHandler):
  def _HandleEntry(self, entry):
    #unicode crap?
    
    return self._Fetch(entry.url)
    
class GoogleProxiedFeedRLHandler(FeedRLHandler):
  def _UrlMangler(self, url):
    return 'http://www.google.com/reader/public/atom/feed/%s' % urllib.quote(url)

class GoogleProxiedFeedCrawlingRLHandler(GoogleProxiedFeedRLHandler):
  def _HandleEntry(self, entry):
    return self._Fetch(entry.url)

RL_CLASS_MAP = {
  ReadingList.URI_STATIC_NOCRAWL      : StaticRLHandler,
  ReadingList.URI_CRAWL_ONCE          : CrawlOnceRLHandler,
  ReadingList.URI_CRAWL_ALWAYS        : CrawlAlwaysRLHandler,
  ReadingList.URI_FEED                : FeedRLHandler,
  ReadingList.URI_FEED_CRAWLER        : FeedCrawlingRLHandler,
  ReadingList.URI_GOOGLE_PROXIED_FEED : GoogleProxiedFeedRLHandler,
  ReadingList.URI_GOOGLE_PROXIED_FEED_CRAWLER :
     GoogleProxiedFeedCrawlingRLHandler,

}

def RLHandlerFactory(rl_element):
  """Returns a class that knows how to process a reading list.
  """
  return RL_CLASS_MAP[rl_element.uri_type](rl_element)


###### http handlers:

class _Handler(webapp.RequestHandler):
  def _RenderTemplate(self, file, values):
    path = os.path.join(os.path.dirname(__file__), file)
    self.response.out.write(template.render(path, values))

  def _TemplateValues(self):
    values = {}
    user = users.get_current_user()
    if user:
      values['user'] = user
      values['logout_url'] = users.create_logout_url('/')
    else:
      values['login_url'] = users.create_login_url(self.request.uri)
    values['bookmarklet'] = BOOKMARKLET
    values['feed_bookmarklet'] = FEED_BOOKMARKLET
    return values

  def _GenericError(self, message):
    values = self._TemplateValues()
    values['error_message'] = message
    self._RenderTemplate('generic_error.html', values)

# TODO(chatham): This is probably a giant security hole.
# showing mostly unsanitized text to users on the same
# domain as we use for everything else.

class ViewContent(_Handler):
  def get(self):
    user = users.get_current_user()
    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return
    entry = db.get(self.request.get('key'))
    if not entry:
      self._GenericError('Could not find entry')
      return
    if entry.owner != user:
      print >>self.response.out, 'Wrong user'
      self.error(HTTP_FORBIDDEN)
    else:
      print >>self.response.out, entry.content

class MainPage(_Handler):
  def get(self):
    self.response.headers['Content-Type'] = 'text/html'
    values = self._TemplateValues()
    self._RenderTemplate('home.html', values)

class ReadingListPage(_Handler):
  def get(self):
    self.response.headers['Content-Type'] = 'text/html'

    values = self._TemplateValues()
    user = users.get_current_user()
    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return
    reading_list = ReadingList.gql("WHERE owner=:1 ORDER BY date DESC",
                                   user).fetch(MAX_ENTRIES)
    bookid = None
    for rl in reading_list:
      if rl.bookid:
        assert bookid is None or bookid == rl.bookid
        bookid = rl.bookid

    if bookid is None:
      bookid = MakeBookId(user.email())
    values['reading_list'] = reading_list
    values['problem_types'] = PROBLEM_TYPES
    values['dlurl'] = '%s/dbook.%s' % (BASE_URL, bookid)

    self._RenderTemplate('reading_list.html', values)

class ReportProblem(_Handler):
  def get(self):
    user = users.get_current_user()
    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return
    problem_type = self.request.get('problemtype')
    if problem_type not in PROBLEM_TYPES:
      self._GenericError('Please select a problem type from the dropdown')
      return
    reading_list = ReadingList.gql('WHERE owner=:1 ORDER BY date DESC',
                                   user).fetch(MAX_ENTRIES)
    report = ProblemReport(reported_by=user, problem_type=problem_type,
                           urls = [e.url for e in reading_list],
                           sizes = [e.size for e in reading_list],
                           fetched = [e.fetched for e in reading_list])
    report.put()
    values = self._TemplateValues()
    self._RenderTemplate('report_problem.html', values)

class DeleteUrl(_Handler):
  def get(self):
    user = users.get_current_user()

    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return
    entry = db.get(self.request.get('key'))
    if not entry:
      self._GenericError('Could not find entry')
      return
    elif entry.owner != user and ENABLE_SECURITY:
      logginge.error('Trying to delete unonwned entry: %s', entry)
      self.error(500)
    else:
      entry.delete()
    self.redirect('/reading_list')

class _AddHandler(_Handler):
  def _AlreadyExists(self, user, url):
    '''If the URL has already been stored by this user, redirect to an
    error and return True.'''
    existing = ReadingList.gql("WHERE owner=:1 and url=:2", user, url).fetch(1)
    if existing:
      self._GenericError('Your list already has an entry with that URL')
      return True
    return False

  def _TooManyEntries(self, user):
    '''If the user already has too many entries in their list, give an
    error and return True'''
    existing = ReadingList.gql("WHERE owner=:1", user).fetch(MAX_ENTRIES)
    if len(existing) >= MAX_ENTRIES:
      values = self._TemplateValues()
      values['max_entries'] = MAX_ENTRIES
      self._RenderTemplate('too_many_entries.html', values)
      return True
    return False

  def _AddContent(self, user, uri, uri_type, content_override=None):
    """
    Add content. supply content_override as a string if you want to use
    that string instead of fetching from the URL.
    """
    
    if self._TooManyEntries(user):
      return

    # for now bookid is a 10-hex hash.
    bookid = MakeBookId(user.email())
    rl = ReadingList(owner=user, url=uri, content='',
                     size=0, fetched=False, uri_type=uri_type,
                     bookid = bookid)
    rlh = RLHandlerFactory(rl)
    try:
      # TODO(vijayp): this abstraction is slightly broken
      rlh.InitialCrawl(content_override)
    except BadURLFetchError, e:
      #TODO make nice
      self._GenericError("url is invalid. Error %s" % e)
      return
    
    except ContentsTooLargeFetchError, size:
      values = self._TemplateValues()
      values['size'] = size
      values['maxsize'] = MAX_CONTENT_SIZE
      values['url'] = url
      self._RenderTemplate('too_big.html', values)
      return
    
    rlh.put()
    logging.info('Added content')
    self.redirect('/reading_list')
  
class AddUrl(_AddHandler):
  def _AddContentWrapper(self, user,url):
    return self._AddContent(user, url, ReadingList.URI_CRAWL_ONCE)
  
  def get(self):
    user = users.get_current_user()
    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return

    url = self.request.get('url')
    if not url:
      self._GenericError('Missing url parameter')
      return
    if self._AlreadyExists(user, url):
      return
    self._AddContentWrapper(user, url)
    
class AddFeedUrl(AddUrl):
  def _AddContentWrapper(self, user, url):
    #    return self._AddContent(user, url, ReadingList.URI_GOOGLE_PROXIED_FEED)
    return self._AddContent(user, url, ReadingList.URI_FEED)

class AddContent(AddUrl):
  def post(self):
    return self.get()
  
  def _AddContentWrapper(self, user, url):
    content = self.request.get('content').encode('utf-8')
    if not content:
      self._GenericError('Missing content parameter')
      return
    self._AddContent(user, url, ReadingList.URI_STATIC_NOCRAWL, content)


class BookPage(_Handler):
  def _GetFileName(self):
    # TODO: caching
    start_time = 1233518324
    delta = int(time.time()) - start_time
    return 'Bibliorize-%d.mobi' % delta

  def _GetReadingList(self):
    user = users.get_current_user()
    if not user:
      self.redirect(users.create_login_url(self.request.uri))
      return
    reading_list = ReadingList.gql("WHERE owner=:1 ORDER BY date DESC",
                                   user).fetch(MAX_ENTRIES)
    return reading_list

  def get(self):
    values = self._TemplateValues()
    reading_list = self._GetReadingList()
    filename = self._GetFileName()
    # TODO(vijayp): ensure that we don't exceed maximum timeout while
    # fetching content.
    content_lst = []
    
    after_timestamp = self.request.get('timestamp')
    if not after_timestamp:
      after_timestamp = -1
    bookids = {}
    for toread in reading_list:
      bookids[toread.bookid] = 1
      # TODO(vijayp): feed in current time from request to this method
      # so that it can construct a sublist if necessary.
      rlh = RLHandlerFactory(toread)
      try:
        content_lst += rlh.GetContentsAsList(after_timestamp)
      except BadURLFetchError, e:
        #Print error to the content list
        logging.warning('could not fetch url %s with error %s', toread.url, e)

    bookid = ''
    if len(bookids) >= 1:
      bookid=bookids.keys()[0]
    refreshurl='%s/dbook?timestamp=%lf&bookid=%s' %(
      BASE_URL, time.time(), bookid)  
    c = mobi.Converter(refreshurl)    
    if self.request.get('html'):
      self.response.out.write(c.MakeOneHTML(content_lst))
      return

    # Create the .mobi file now, using contents that we fetched above.
    # TODO: do we handle an empty list gracefully?
    self.response.headers['Content-Disposition'] = (
      'attachment; filename="%s"' % filename)
    self.response.headers['Content-Type'] = 'application/x-mobipocket-ebook'
    self.response.out.write(c.ConvertStrings(content_lst))

class DirectBookPage(BookPage):
  def _GetReadingList(self):
    bookid = self.request.get('bookid')
    if not bookid:
      #TODO make this nice
      bookid = self.request.url.split('.')[-1].split('?')[0]

    reading_list = ReadingList.gql("WHERE bookid=:1 ORDER BY date DESC",
                                   bookid).fetch(MAX_ENTRIES)
    return reading_list
    
class TemplatedPage(_AddHandler):
  def get(self, arg):
    values = self._TemplateValues()
    if arg == '/help/index.html':
      self._RenderTemplate('help.html', values)
      return

application = webapp.WSGIApplication(
                                     [('/', MainPage),
                                      ('/reading_list', ReadingListPage),
                                      ('/book.*', BookPage),
                                      ('/dbook.*', DirectBookPage),
                                      ('/add_url', AddUrl),
                                      ('/add_feed', AddFeedUrl),
                                      ('/add_content', AddContent),
                                      ('/delete', DeleteUrl),
                                      ('/view', ViewContent),
                                      ('/problem', ReportProblem),
                                      ('(/help/.*)', TemplatedPage),
                                      ])

def main():
  # IMPORTANT: never ever check in the data in the salt file.
  # If people get a hold of it, they will be able to guess the book url
  # based on an email address!
  global SALT
  SALT = open('SALT', 'r').readlines()[0]
  run_wsgi_app(application)

if __name__ == "__main__":
  main()
