#!/usr/bin/env python
# Copyright 2008 Brett Slatkin
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

__author__ = "Brett Slatkin (bslatkin@gmail.com)"


try:
  from BeautifulSoup import BeautifulSoup, Tag
except:
  print "BeautifulSoup could not be imported!"

# try:
#   from mendeley_client import MendeleyClient
# except:
#   print "Mendeley Client could not be imported!"
 

"""
Mendeley Open API Example Client

Copyright (c) 2010, Mendeley Ltd. <copyright@mendeley.com>

Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.

THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

For details of the Mendeley Open API see http://dev.mendeley.com/

Example usage:

>>> from pprint import pprint
>>> from mendeley_client import MendeleyClient
>>> mendeley = MendeleyClient('<consumer_key>', '<secret_key>')
>>> try:
>>> 	mendeley.load_keys()
>>> except IOError:
>>> 	mendeley.get_required_keys()
>>> 	mendeley.save_keys()
>>> results = mendeley.search('science')
>>> pprint(results['documents'][0])
{u'authors': None,
 u'doi': None,
 u'id': u'8c18bd50-6f07-11df-b8f0-001e688e2dcb',
 u'mendeley_url': u'http://localhost/research//',
 u'publication_outlet': None,
 u'title': None,
 u'year': None}
>>> documents = mendeley.library()
>>> pprint(documents)
{u'current_page': 0,
 u'document_ids': [u'86175', u'86176', u'86174', u'86177'],
 u'items_per_page': 20,
 u'total_pages': 1,
 u'total_results': 4}
>>> details = mendeley.document_details(documents['document_ids'][0])
>>> pprint(details)
{u'authors': [u'Ben Dowling'],
 u'discipline': {u'discipline': u'Computer and Information Science',
                 u'subdiscipline': None},
 u'tags': ['nosql'],
 u'title': u'NoSQL(EU) Write Up',
 u'year': 2010}
"""
from pprint import pprint
import oauth2 as oauth
import pickle
import httplib
import simplejson
import urllib

class OAuthClient(object):
    """General purpose OAuth client"""
    def __init__(self, consumer_key, consumer_secret, options = {}):
        # Set values based on provided options, or revert to defaults
        self.host = options.get('host', 'api.mendeley.com')
        self.port = options.get('port', 80)
        self.access_token_url = options.get('access_token_url', '/oauth/access_token/')
        self.request_token_url = options.get('access_token_url', '/oauth/request_token/')
        self.authorize_url = options.get('access_token_url', '/oauth/authorize/')

        if self.port == 80: self.authority = self.host
        else: self.authority = "%s:%d" % (self.host, self.port)

        self.consumer = oauth.Consumer(consumer_key, consumer_secret)

    def get(self, path, token=None):
        url = "http://%s%s" % (self.host, path)
        request = oauth.Request.from_consumer_and_token(
            self.consumer,
            token,
            http_method='GET',
            http_url=url,
        )
        return self._send_request(request, token)

    def post(self, path, post_params, token=None):
        url = "http://%s%s" % (self.host, path)
        request = oauth.Request.from_consumer_and_token(
            self.consumer,
            token,
            http_method='POST',
            http_url=url,
            parameters=post_params
        )
        return self._send_request(request, token)
    
    def delete(self, path, token=None):
        url = "http://%s%s" % (self.host, path)
        request = oauth.Request.from_consumer_and_token(
            self.consumer, 
            token, 
            http_method='DELETE', 
            http_url=url, 
        )
        return self._send_request(request, token)

    def put(self, path, token=None, body=None, body_hash=None, headers=None):
        url = "http://%s%s" % (self.host, path)
        request = oauth.Request.from_consumer_and_token(
            self.consumer,
            token,
            http_method='PUT',
            http_url=url,
            parameters={'oauth_body_hash': body_hash}
        )
        return self._send_request(request, token, body, headers)

    def request_token(self):
        response = self.get(self.request_token_url).read()
        token = oauth.Token.from_string(response)
        return token 
    
    def authorize(self, token, callback_url = "oob"):
        http_url='http://%s%s' % (self.authority, self.authorize_url)
        request = oauth.Request.from_token_and_callback(token=token, callback=callback_url, http_url='http://%s%s' % (self.authority, self.authorize_url))
        return request.to_url()

    def access_token(self, request_token):
        response = self.get(self.access_token_url, request_token).read()
        return oauth.Token.from_string(response)

    def _send_request(self, request, token=None, body=None, extra_headers=None):
        request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, token)
        conn = self._get_conn()
        if request.method == 'POST':
            conn.request('POST', request.url, body=request.to_postdata(), headers={"Content-type": "application/x-www-form-urlencoded"})
        elif request.method == 'PUT':
            final_headers = request.to_header()
            if extra_headers is not None:
                final_headers.update(extra_headers)
            conn.request('PUT', request.url, body, headers=final_headers)                 
        elif request.method == 'DELETE':
            conn.request('DELETE', request.url, headers=request.to_header())
        else:
            conn.request('GET', request.url, headers=request.to_header())
        return conn.getresponse()

    def _get_conn(self):
        return httplib.HTTPConnection("%s:%d" % (self.host, self.port))

class MendeleyRemoteMethod(object):
    """Call a Mendeley OpenAPI method and parse and handle the response"""
    def __init__(self, details, callback):
        self.details = details # Argument, URL and additional details.
        self.callback = callback # Callback to actually do the remote call
    
    def __call__(self, *args, **kwargs):
        url = self.details['url']
        # Get the required arguments 
        if self.details.get('required'):
            required_args = dict(zip(self.details.get('required'), args))
            if len(required_args) < len(self.details.get('required')):
                raise ValueError('Missing required args')

            for (key, value) in required_args.items():
                required_args[key] = urllib.quote_plus(str(value))

            url = url % required_args

        # Optional arguments must be provided as keyword args
        optional_args = {}
        for optional in self.details.get('optional', []):
            if kwargs.has_key(optional):
                optional_args[optional] = kwargs[optional]

        # Do the callback - will return a HTTPResponse object
        response = self.callback(url, self.details.get('access_token_required', False), self.details.get('method', 'get'), optional_args)
        status = response.status
        body = response.read()
        if status == 500:
            raise Exception(body)
        if status != 204:
            data = simplejson.loads(body)
            return data

class MendeleyClient(object):
    # API method definitions. Used to create MendeleyRemoteMethod instances
    methods = {
        ######## Public Resources ########
        'details': {
            'required': ['id'],
            'optional': ['type'],
            'url': '/oapi/documents/details/%(id)s/',
        },
        'categories': {
            'url': '/oapi/documents/categories/',    
        },
        'subcategories': {
            'url': '/oapi/documents/subcategories/%(id)s/',
            'required': ['id'],
        },
        'search': {
            'url': '/oapi/documents/search/%(query)s/',
            'required': ['query'],
            'optional': ['page', 'items'],
        },
        'tagged': {
            'url': '/oapi/documents/tagged/%(tag)s/',
            'required': ['tag'],
            'optional': ['cat', 'subcat', 'page', 'items'],
        },
        'related': {
            'url': '/oapi/documents/related/%(id)s/', 
            'required': ['id'],
            'optional': ['page', 'items'],
        },
        'authored': {
            'url': '/oapi/documents/authored/%(author)s/',
            'required': ['author'],
            'optional': ['page', 'items'],
        },
        'public_groups': {
            'url': '/oapi/documents/groups/',
            'optional': ['page', 'items', 'cat']
        },
        'public_group_details': {
            'url': '/oapi/documents/groups/%(id)s/',
            'required': ['id'],
        },
        'public_group_docs': {
            'url': '/oapi/documents/groups/%(id)s/docs/',
            'required': ['id'],
            'optional': ['details', 'page', 'items'],
        },
        'public_group_people': {
            'url': '/oapi/documents/groups/%(id)s/people/',
            'required': ['id'],
        },
        'author_stats': {
            'url': '/oapi/stats/authors/',
            'optional': ['discipline', 'upandcoming'],
        },
        'paper_stats': {
            'url': '/oapi/stats/papers/',
            'optional': ['discipline', 'upandcoming'],
        },
        'publication_stats': {
            'url': '/oapi/stats/publications/',
            'optional': ['discipline', 'upandcoming'],
        },
        'tag_stats': {
            'url': '/oapi/stats/tags/%(discipline)s/',
            'required': ['discipline'],
            'optional': ['upandcoming'],
        },
        ######## User Specific Resources ########
        'library_author_stats': {
            'url': '/oapi/library/authors/',
            'access_token_required': True,
        },
        'library_tag_stats': {
            'url': '/oapi/library/tags/',
            'access_token_required': True,
        },
        'library_publication_stats': {
            'url': '/oapi/library/publications/',
            'access_token_required': True,
        },
        'library': {
            'url': '/oapi/library/',
            'optional': ['page', 'items'],
            'access_token_required': True,
        },
        'create_document': {
            'url': '/oapi/library/documents/',
            # HACK: 'document' is required, but by making it optional here it'll get POSTed
            # Unfortunately that means it needs to be a named param when calling this method
            'optional': ['document'],
            'access_token_required': True,
            'method': 'post',
        },
        'upload_pdf': {
            'url': '/oapi/library/documents/%(id)s/',
            'required': ['id'],
                'optional': ['data', 'file_name', 'oauth_body_hash', 'sha1_hash'],
                'access_token_required': True,
                'method': 'put'
        },
        'document_details': {
            'url': '/oapi/library/documents/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
        },
        'documents_authored': {
            'url': '/oapi/library/documents/authored/',
            'access_token_required': True,
        },
        'delete_library_document': {
            'url': '/oapi/library/documents/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'delete',
        },
        'contacts': {
            'url': '/oapi/profiles/contacts/',
            'access_token_required': True,
            'method': 'get',    
        }, 
        'contacts_of_contact': {
            'url': '/oapi/profiles/contacts/%(id)s/', 
            'required': ['id'],
            'access_token_required': True, 
            'method': 'get',
        },
        'add_contact': {
            'url': '/oapi/profiles/contacts/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'post',
        },
        # Folders methods #
        'folders': {
            'url': '/oapi/library/folders/',
            'access_token_required': True,
        },
        'folder_documents': {
            'url': '/oapi/library/folders/%(id)s/',
            'required': ['id'],
            'optional': ['page', 'items'],
            'access_token_required': True,
        },
        'create_folder': {
            'url': '/oapi/library/folders/',
            # HACK: 'collection' is required, but by making it optional here it'll get POSTed
            # Unfortunately that means it needs to be a named param when calling this method
            'optional': ['folder'],
            'access_token_required': True,
            'method': 'post',
        },
        'delete_folder': {
            'url': '/oapi/library/folders/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'delete',
        },
        'add_document_to_folder': {
            'url': '/oapi/library/folders/%(folder_id)s/%(document_id)s/',
            'required': ['folder_id', 'document_id'],
            'access_token_required': True,
            'method': 'post',
        },
        'delete_document_from_folder': {
            'url': '/oapi/library/folders/%(folder_id)s/%(document_id)s/',
            'required': ['folder_id', 'document_id'],
            'access_token_required': True,
            'method': 'delete',
        },
        # Groups methods #
        'groups': {
            'url': '/oapi/library/groups/',
            'access_token_required': True,
        },
        'group_documents': {
            'url': '/oapi/library/groups/%(id)s/',
            'required': ['id'],
            'optional': ['page', 'items'],
            'access_token_required': True,
        },
        'group_doc_details': {
            'url': '/oapi/library/groups/%(group_id)s/%(doc_id)s/',
            'required': ['group_id', 'doc_id'],
            'access_token_required': True,
        },
        'group_people': {
            'url': '/oapi/library/groups/%(id)s/people/', 
            'required': ['id'],
            'access_token_required': True,
        },        
        'create_group': {
            'url': '/oapi/library/groups/',
            'optional': ['group'],
            'access_token_required': True,
            'method': 'post',
        },
        'delete_group': {
            'url': '/oapi/library/groups/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'delete',
        },
        'leave_group': {
            'url': '/oapi/library/groups/%(id)s/leave/', 
            'required': ['id'],
            'access_token_required': True, 
            'method': 'delete',
        },
        'unfollow_group': {
            'url': '/oapi/library/groups/%(id)s/unfollow/', 
            'required': ['id'],
            'access_token_required': True, 
            'method': 'delete',
        },
        'delete_group_document': {
            'url': '/oapi/library/groups/%(group_id)s/%(document_id)s/',
            'required': ['group_id', 'document_id'],
            'access_token_required': True,
            'method': 'delete',
        },
        ######## DEPRECATED METHODS ########
        # Deprecated
        'collections': {
            'url': '/oapi/library/collections/',
            'access_token_required': True,
        },
        # Deprecated
        'sharedcollections': {
            'url': '/oapi/library/sharedcollections/',
            'access_token_required': True,
        },
        # Deprecated
        'collection_documents': {
            'url': '/oapi/library/collections/%(id)s/',
            'required': ['id'],
            'optional': ['page', 'items'],
            'access_token_required': True,
        },
        # Deprecated
        'sharedcollection_documents': {
            'url': '/oapi/library/sharedcollections/%(id)s/',
            'required': ['id'],
            'optional': ['page', 'items'],
            'access_token_required': True,
        },
        # Deprecated
        'sharedcollection_members': {
            'url': '/oapi/library/sharedcollections/%(id)s/members/', 
            'required': ['id'],
            'access_token_required': True,
        },
        # Deprecated
        'delete_collection': {
            'url': '/oapi/library/collections/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'delete',
        },
        # Deprecated
        'delete_sharedcollection': {
            'url': '/oapi/library/sharedcollections/%(id)s/',
            'required': ['id'],
            'access_token_required': True,
            'method': 'delete',
        },
        # Deprecated
        'create_collection': {
            'url': '/oapi/library/collections/',
            # HACK: 'collection' is required, but by making it optional here it'll get POSTed
            # Unfortunately that means it needs to be a named param when calling this method
            'optional': ['collection'],
            'access_token_required': True,
            'method': 'post',
        },
        # Deprecated
        'create_sharedcollection': {
            'url': '/oapi/library/sharedcollections/',
            'optional': ['sharedcollection'],
            'access_token_required': True,
            'method': 'post',
        },
        # Deprecated
        'add_document_to_collection': {
            'url': '/oapi/library/collections/add/%(collection_id)s/%(document_id)s/',
            'required': ['collection_id', 'document_id'],
            'access_token_required': True,
            'method': 'post',
        },
        # Deprecated
        'remove_document_from_collection': {
            'url': '/oapi/library/collections/%(collection_id)s/%(document_id)s/',
            'required': ['collection_id', 'document_id'],
            'access_token_required': True,
            'method': 'delete',
        },
        # Deprecated
        'delete_sharedcollection_document': {
            'url': '/oapi/library/sharedcollections/%(collection_id)s/%(document_id)s/',
            'required': ['collection_id', 'document_id'],
            'access_token_required': True,
            'method': 'delete',
        }
    }

    def __init__(self, consumer_key, consumer_secret):
        self.mendeley = OAuthClient(consumer_key, consumer_secret)
        # Create methods for all of the API calls    
        for method, details in self.methods.items():
            setattr(self, method, MendeleyRemoteMethod(details, self.api_request))

    def api_request(self, url, access_token_required = False, method = 'get', params = {}):
        if access_token_required: access_token = self.access_token
        else: access_token = None
        
        if method == 'get':
            if len(params) > 0:
                url += "?%s" % urllib.urlencode(params)
            response = self.mendeley.get(url, access_token)
        elif method == 'delete':
            response = self.mendeley.delete(url, access_token)
        elif method == 'put':
            headers = {'Content-disposition': 'attachment; filename="%s"' % params.get('file_name')}
            response = self.mendeley.put(url, access_token, params.get('data'), params.get('oauth_body_hash'), headers)
        else:
            response = self.mendeley.post(url, params, access_token)
        return response

    def get_required_keys(self):
        self.request_token = self.mendeley.request_token()
        auth_url = self.mendeley.authorize(self.request_token)
        print 'Go to the following url to auth the token:\n%s' % (auth_url,)
        verifier = raw_input('Enter verification code: ')
        self.request_token.set_verifier(verifier)
        self.access_token = self.mendeley.access_token(self.request_token)
    
    def load_keys(self):
        data = pickle.load(open('mendeley_api_keys.pkl', 'r'))
        self.request_token = data['request_token']
        self.access_token = data['access_token']

    def save_keys(self):
        data = {'request_token': self.request_token, 'access_token': self.access_token}
        pickle.dump(data, open('mendeley_api_keys.pkl', 'w'))


  
import datetime
import hashlib
import logging
import pickle
import re
import simplejson
import os
import string
import sys
import time
import urllib
import wsgiref.handlers
import cgi

from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.runtime import apiproxy_errors

import transform_content

mendeley = MendeleyClient('11c16fb7a31f44b76305c7dcaf8a880a04dbb5931', '01a184cb4b4de87c404040b1314bcf02')

try:
    mendeley.load_keys()
except IOError:
    mendeley.get_required_keys()
    mendeley.save_keys()

################################################################################

DEBUG = False
EXPIRATION_DELTA_SECONDS = 3600
EXPIRATION_RECENT_URLS_SECONDS = 90

## DEBUG = True
## EXPIRATION_DELTA_SECONDS = 10
## EXPIRATION_RECENT_URLS_SECONDS = 1

HTTP_PREFIX = "http://"
HTTPS_PREFIX = "http://"

IGNORE_HEADERS = frozenset([
  'set-cookie',
  'expires',
  'cache-control',

  # Ignore hop-by-hop headers
  'connection',
  'keep-alive',
  'proxy-authenticate',
  'proxy-authorization',
  'te',
  'trailers',
  'transfer-encoding',
  'upgrade',
])

TRANSFORMED_CONTENT_TYPES = frozenset([
  "text/html",
  "text/css",
])

MIRROR_HOSTS = frozenset([
  'mirrorr.com',
  'mirrorrr.com',
  'www.mirrorr.com',
  'www.mirrorrr.com',
  'www1.mirrorrr.com',
  'www2.mirrorrr.com',
  'www3.mirrorrr.com',
])

MAX_CONTENT_SIZE = 10 ** 6

MAX_URL_DISPLAY_LENGTH = 50

################################################################################

class MPub(object):
    
    def __init__(self, uuid, title, abstract, authors, keywords, publication, tags, url, year, readers, type):
        self.uuid = uuid
        self.title = title
        self.abstract = abstract
        self.authors = authors
        self.keywords = keywords
        self.title = title
        self.tags = tags
        self.url = url
        self.year = year
        self.readers = readers
        self.type = type
# return '[' + ','.join([self.uuid,self.title,self.abstract,'#'.join([a[0] for a in self.authors]),'#'.join(self.keywords),self.title,'#'.join(self.tags),self.url,self.year]) +  ']'
    def as_string(self):
        string = "["
        string += ','.join([str(x) for x in [self.readers, self.year]])
        string += ","
        #string += ','.join(['"'+str(x).encode('utf-8', 'ignore')+'"' for x in [self.title.encode('ascii', 'ignore'), self.authors[0][0].encode('ascii', 'ignore'), self.type.encode('ascii', 'ignore'), self.abstract.encode('utf-8', 'ignore'), self.url.encode('ascii', 'ignore')]])        
        string += ','.join(['"'+str(x)+'"' for x in [self.title.encode('ascii', 'ignore'), self.authors[0][0].encode('ascii', 'ignore'), self.type.encode('ascii', 'ignore'), self.url.encode('ascii', 'ignore'), self.abstract.encode('ascii', 'ignore').replace('"', '\\"') if self.abstract else "No abstract found"]])        
        string += "]"
        return string

    def __str__(self):
        return "(Mendeley Paper Object: %s)" % self.uuid
    
    def __repr__(self):
        return "(Mendeley Paper Object: %s)" % self.uuid

    def __eq__(self, other):
        if isinstance(other, self.__class__):
            return self.uuid == other.uuid
        else:
            return false
    
    def __ne__(self, other):
        return not self.__eq__(other)

def get_mendeley_docs(url):
  url_words = url.lower().rsplit('/', 1)[1].split('_')
  keyword = string.join(url_words)
  logging.warning(keyword)
  
  categories = mendeley.categories()
  logging.warning(categories)
  for i in range(len(categories)):
    logging.warning(mendeley.subcategories(i))
  
  
  docs = mendeley.search(keyword, items=5)
  pub_list = []
  
  for doc in docs['documents']:
    uuid = doc['uuid']
    doc_details = mendeley.details(uuid)
    
    title = doc_details.get('title', None)
    abstract = doc_details.get('abstract', None)

    author_data = doc_details.get('authors', None)
    if author_data:
      authors = []
      for author_record in author_data:
        authors.append((author_record['surname'], author_record['forename']))
    else:
      authors = None
    
    kw_data = doc_details.get('keywords', None)
    if kw_data:
      keywords = set(kw_data)
    else:
      keywords = None
    
    tag_data = doc_details.get('tags', None)
    if tag_data:
      tags = set(tag_data)
    else:
      tags = None
    
    publication = doc_details.get('publication_outlet', None)
    
    url = doc_details.get('mendeley_url', None)
    year = int(doc_details.get('year', None))
    stats = doc_details.get('stats', None)
    readers = int(stats.get('readers', None))
    type = doc_details.get('type', None)
    dis_data = stats.get('discipline', None)
    if dis_data:
      disciplines = []
      dis_values = []
      for dis_record in dis_data:
        disciplines.append((dis_record['name']))
        dis_values.append(dis_record['value'])
    else:
      disciplines = None
      dis_values = None
    
    curr_pub = MPub(uuid, title, abstract, authors, keywords, publication, tags, url, year, readers, type)
    pub_list.append(curr_pub)
    
  return pub_list


################################################################################

def get_url_key_name(url):
  url_hash = hashlib.sha256()
  url_hash.update(url)
  return "hash_" + url_hash.hexdigest()

################################################################################

class EntryPoint(db.Model):
  translated_address = db.TextProperty(required=True)
  last_updated = db.DateTimeProperty(auto_now=True)
  display_address = db.TextProperty()


class MirroredContent(object):
  def __init__(self, original_address, translated_address,
               status, headers, data, base_url):
    self.original_address = original_address
    self.translated_address = translated_address
    self.status = status
    self.headers = headers
    self.data = data
    self.base_url = base_url

  @staticmethod
  def get_by_key_name(key_name):
    return memcache.get(key_name)

  @staticmethod
  def fetch_and_store(key_name, base_url, translated_address, mirrored_url):
    """Fetch and cache a page.
    
    Args:
      key_name: Hash to use to store the cached page.
      base_url: The hostname of the page that's being mirrored.
      translated_address: The URL of the mirrored page on this site.
      mirrored_url: The URL of the original page. Hostname should match
        the base_url.
    
    Returns:
      A new MirroredContent object, if the page was successfully retrieved.
      None if any errors occurred or the content could not be retrieved.
    """
    # Check for the X-Mirrorrr header to ignore potential loops.
    if base_url in MIRROR_HOSTS:
      logging.warning('Encountered recursive request for "%s"; ignoring',
                      mirrored_url)
      return None
    
    try:
      response = urlfetch.fetch(mirrored_url)               # Google API that fetches web page info for a given URL
    except (urlfetch.Error, apiproxy_errors.Error):
      logging.exception("Could not fetch URL")
      return None

    adjusted_headers = {}
    for key, value in response.headers.iteritems():         # HTTP Response Headers
      adjusted_key = key.lower()
      if adjusted_key not in IGNORE_HEADERS:
        adjusted_headers[adjusted_key] = value              # Store the relevant ones.

    # m_docs_str = "[['abc',1,'def'],['abc',1,'def'],['abc',1,'def']]"
    # THIS IS WHERE MENDELEY RELEVANT DOC INFO IS STORED.
    # IN A LATER VERSION, IDEALLY WE COULD SEND THIS ELSEWHERE, DO THE COMPUTATION
    # AND SEND IT BACK?

    #logging.debug("Fetching '%s'", mirrored_url)
    
    content = response.content                                                  # Page content - woo! This is what we can screw with
    page_content_type = adjusted_headers.get("content-type", "")
    for content_type in TRANSFORMED_CONTENT_TYPES:
      # Startswith() because there could be a 'charset=UTF-8' in the header.
      if page_content_type.startswith(content_type):
        content = transform_content.TransformContent(base_url, mirrored_url,
                                                     content)
        break

    # If the transformed content is over 1MB, truncate it (yikes!)
    if len(content) > MAX_CONTENT_SIZE:
      logging.warning('Content is over 1MB; truncating')
      content = content[:MAX_CONTENT_SIZE]


    
    # SCREW WITH THE CONTENT HERE FOLKS.
    if mirrored_url.find('/wiki/') != -1:
      m_docs = get_mendeley_docs(mirrored_url)
      m_docs_str = '[' + ','.join([doc.as_string() for doc in m_docs]) + ']'
      logging.warning(m_docs_str)
      
      css_collapse = '<link type="text/css" rel="stylesheet" media="all" href="/static/diQuery-collapsiblePanel.css">'
      css_wikiradar = '<link type="text/css" rel="stylesheet" media="all" href="/static/wikiradar.css">'
      css_files = [css_collapse, css_wikiradar]
      
      script_lang = '<script type="text/javascript">var languagePath = "enwiki"; </script>'
      script_jquery = '<script type="text/javascript" src="http://sanjaykairam.com/projects/wikiradar/wr-scripts/wr-scripts/jquery-1.6.1.min.js"> </script>'
      script_proto = '<script type="text/javascript" src="http://sanjaykairam.com/projects/wikiradar/wr-scripts/wr-scripts/protovis/protovis-d3.2.js"></script>'
      # script_wr = '<script type="text/javascript" src="http://sanjaykairam.com/projects/wikiradar/wr-scripts/wr-scripts/wikiradar.js"></script>'
      # script_wr = '<script type="text/javascript" src="http://www.stanford.edu/~skandel/wiki/wikiradar2.js"></script>'
      script_collapse = '<script type="text/javascript" src="/static/diQuery-collapsiblePanel.js"></script>'
      script_wr = '<script type="text/javascript" src="/static/wikiradar2.js"></script>'
      script_dt = '<script type="text/javascript" src="http://www.stanford.edu/~skandel/wiki/jquery.Datatable.js"></script>'
      script_lj = '<script type="text/javascript" src="http://www.stanford.edu/~skandel/wiki/jquery.layout.min.js"></script>'

      script_vis = '<script type="text/javascript" src="http://www.google.com/jsapi"></script>'
      script_goog = '<script type="text/javascript">google.load(\'visualization\', \'1\', {packages: [\'table\']}); </script>'
      
      script_data = '<script type="text/javascript"> var mendeleyData; var custom = function(){mendeleyData='+m_docs_str+'};'+'</script>'


      scripts = [script_lang, script_jquery, script_proto, script_data, script_collapse, script_vis, script_goog, script_lj, script_wr, script_dt]
      
      s_marker = '</head>'
      s_marker_loc = content.find(s_marker)

      temp_content = content[:s_marker_loc] + '\n'
      for css in css_files:
        temp_content = temp_content + css + '\n' 
      for script in scripts:
        temp_content = temp_content + script + '\n' 
      content = temp_content + content[s_marker_loc:]
      
      b_marker = '<body'
      b_marker_loc = content.find(b_marker) + len(b_marker)
      content = content[:b_marker_loc] + ' onLoad="draw();custom();"' + content[b_marker_loc:]
      
      c_marker = '<div id="jump-to-nav">'
      c_marker_loc = content.find(c_marker)
      content = content[:c_marker_loc] + '<div class="collapsibleContainer" title="Example Collapsible Panel" style="padding:10px"><div id="radar-top" width="1000" height="150"><table id="dtable"></table><div id="chart_div"></div><div id="details"></div><div id="clear_div"></div></div></div>\n' + content[c_marker_loc:]
      #content = content[:c_marker_loc] + '<div id="radar-bottom"></div>\n' + content[c_marker_loc:]
    
    new_content = MirroredContent(
      base_url=base_url,
      original_address=mirrored_url,
      translated_address=translated_address,
      status=response.status_code,
      headers=adjusted_headers,
      data=content)
    if not memcache.add(key_name, new_content, time=EXPIRATION_DELTA_SECONDS):
      logging.error('memcache.add failed: key_name = "%s", '
                    'original_url = "%s"', key_name, mirrored_url)
      
    return new_content
################################################################################

class BaseHandler(webapp.RequestHandler):
  def get_relative_url(self):
    slash = self.request.url.find("/", len(self.request.scheme + "://"))
    if slash == -1:
      return "/"
    return self.request.url[slash:]


class HomeHandler(BaseHandler):
  def get(self):
    # Handle the input form to redirect the user to a relative url
    form_url = self.request.get("url")
    if form_url:
      # Accept URLs that still have a leading 'http://'
      inputted_url = urllib.unquote(form_url)
      if inputted_url.startswith(HTTP_PREFIX):
        inputted_url = inputted_url[len(HTTP_PREFIX):]
      return self.redirect("/" + inputted_url)

    latest_urls = memcache.get('latest_urls')
    if latest_urls is None:
      latest_urls = EntryPoint.gql("ORDER BY last_updated DESC").fetch(25)

      # Generate a display address that truncates the URL, adds an ellipsis.
      # This is never actually saved in the Datastore.
      for entry_point in latest_urls:
        entry_point.display_address = \
          entry_point.translated_address[:MAX_URL_DISPLAY_LENGTH]
        if len(entry_point.display_address) == MAX_URL_DISPLAY_LENGTH:
          entry_point.display_address += '...'

      if not memcache.add('latest_urls', latest_urls,
                          time=EXPIRATION_RECENT_URLS_SECONDS):
        logging.error('memcache.add failed: latest_urls')

    # Do this dictionary construction here, to decouple presentation from
    # how we store data.
    secure_url = None
    if self.request.scheme == "http":
      secure_url = "https://mirrorrr.appspot.com"
    context = {
      "latest_urls": latest_urls,
      "secure_url": secure_url,
    }
    self.response.out.write(template.render("main.html", context))


class MirrorHandler(BaseHandler):
  def get(self, base_url):
    assert base_url
    
    # Log the user-agent and referrer, to see who is linking to us.
    logging.debug('User-Agent = "%s", Referrer = "%s"',
                  self.request.user_agent,
                  self.request.referer)
    logging.debug('Base_url = "%s", url = "%s"', base_url, self.request.url)

    translated_address = self.get_relative_url()[1:]  # remove leading /
    mirrored_url = HTTP_PREFIX + translated_address

    # Use sha256 hash instead of mirrored url for the key name, since key
    # names can only be 500 bytes in length; URLs may be up to 2KB.
    key_name = get_url_key_name(mirrored_url)
    logging.info("Handling request for '%s' = '%s'", mirrored_url, key_name)

    content = MirroredContent.get_by_key_name(key_name)
    cache_miss = False
    if content is None:
      logging.debug("Cache miss")
      cache_miss = True
      content = MirroredContent.fetch_and_store(key_name, base_url,
                                                translated_address,
                                                mirrored_url)
    if content is None:
      return self.error(404)

    # Store the entry point down here, once we know the request is good and
    # there has been a cache miss (i.e., the page expired). If the referrer
    # wasn't local, or it was '/', then this is an entry point.
    if (cache_miss and
        'Googlebot' not in self.request.user_agent and
        'Slurp' not in self.request.user_agent and
        (not self.request.referer.startswith(self.request.host_url) or
         self.request.referer == self.request.host_url + "/")):
      # Ignore favicons as entry points; they're a common browser fetch on
      # every request for a new site that we need to special case them here.
      if not self.request.url.endswith("favicon.ico"):
        logging.info("Inserting new entry point")
        entry_point = EntryPoint(
          key_name=key_name,
          translated_address=translated_address)
        try:
          entry_point.put()
        except (db.Error, apiproxy_errors.Error):
          logging.exception("Could not insert EntryPoint")
    
    for key, value in content.headers.iteritems():
      self.response.headers[key] = value
    if not DEBUG:
      self.response.headers['cache-control'] = \
        'max-age=%d' % EXPIRATION_DELTA_SECONDS
    text=content.data
    #text=text.decode('utf-8','ignore')
    self.response.out.write(text)
    self.response.out.write(text)


class AdminHandler(webapp.RequestHandler):
  def get(self):
    self.response.headers['content-type'] = 'text/plain'
    self.response.out.write(str(memcache.get_stats()))


class KaboomHandler(webapp.RequestHandler):
  def get(self):
    self.response.headers['content-type'] = 'text/plain'
    self.response.out.write('Flush successful: %s' % memcache.flush_all())


class CleanupHandler(webapp.RequestHandler):
  """Cleans up EntryPoint records."""

  def get(self):
    keep_cleaning = True
    try:
      content_list = EntryPoint.gql('ORDER BY last_updated').fetch(25)
      keep_cleaning = (len(content_list) > 0)
      db.delete(content_list)
      
      if content_list:
        message = "Deleted %d entities" % len(content_list)
      else:
        keep_cleaning = False
        message = "Done"
    except (db.Error, apiproxy_errors.Error), e:
      keep_cleaning = True
      message = "%s: %s" % (e.__class__, e)

    context = {  
      'keep_cleaning': keep_cleaning,
      'message': message,
    }
    self.response.out.write(template.render('cleanup.html', context))

################################################################################

app = webapp.WSGIApplication([
  (r"/", HomeHandler),
  (r"/main", HomeHandler),
  (r"/kaboom", KaboomHandler),
  (r"/admin", AdminHandler),
  (r"/cleanup", CleanupHandler),
  (r"/([^/]+).*", MirrorHandler)
], debug=DEBUG)


def main():
  wsgiref.handlers.CGIHandler().run(app)


if __name__ == "__main__":
  main()
