#!/usr/bin/env python
# encoding: utf-8
"""
FullTextSearch.py

Created by Rui Carmo on 2006-08-19.
Published under the MIT license.
"""

from snakeserver.snakelet import Snakelet
import os, sys, time, rfc822, unittest, urlparse, urllib, re, stat, cgi
from sets import Set
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
from Page import Page
from Store import Store, BASE_FILENAME
from Cache import Cache
from Utils import *
from Layout import *
from Plugins import *
import fetch, simplejson
import Locale

# try to speed up pickle if possible
try:
  import cPickle as pickle
except ImportError: # fall back on Python version
  import pickle 

def renderInfo(i18n, headers):
  postinfo = i18n['created_on_format'] % (plainTime(i18n, headers['date']), headers['from'])
  if headers['date'] == headers['last-modified']:
    postinfo = postinfo + ", %s." % i18n['not_updated']
  elif headers['date'] < headers['last-modified']:
    postinfo = postinfo + ", %s." % (i18n['updated_ago_format'] % timeSince(i18n, headers['last-modified']))
  return postinfo  


def subRender(c,page,request,response):
  soup = BeautifulSoup(page.render(c.defaultmarkup), selfClosingTags=['plugin'],convertEntities=['html','xml'])
  for tag in soup('plugin'):
    c.plugins.run(tag, 'plugin', page.headers['name'], soup, request, response)
  c.plugins.runForAllTags(page.headers['name'], soup, request, response)
  return unicode(soup.renderContents(),'utf-8')

def renderPage(c, page, request = None, response = None, cache = True):
  """Auxiliary function invoked from Indexer and Engine"""
  if request is False:
    # page rendered within a feed or batch context
    key = "soup:" + '_' + page.headers['name']
  else:
    # page rendered for online viewing or indexing
    key = "soup:" + page.headers['name']
  if not cache:
    return subRender(c,page,request,response)
  else:
    if "x-cache-control" in page.headers.keys():
      control = page.headers["x-cache-control"].lower()
      m = MAX_AGE_REGEX.match(control)
      if m:
        seconds = int(m.group(3))
        try:
          if (c.cache.mtime(key) + seconds) < time.time():
            del(c.cache[key])
        except KeyError:
          pass
    try:
      if c.store.mtime(page.headers['name']) > c.cache.mtime(key):
        del(c.cache[key])
        raise KeyError
      else:
        return c.cache[key]
    except KeyError:
      c.cache[key] = buffer = subRender(c,page,request,response)
    return buffer
  # end else



class DeliciousImporter(Snakelet):
  """Link blog"""
  
  def requiresSession(self):
    return self.SESSION_NOT_NEEDED

  def serve(self, request, response):
    response.getOutput().write("nothing here.")
  
  def getDescription(self):
    return "del.icio.us link blog retriever"

  def init(self):
    try:
      delicious = self.getWebApp().getConfigItem('del.icio.us')
      self.url = delicious['url']
      self.authors = delicious['authors']
      self.format = delicious['format']
    except:
      print "Invalid or missing del.icio.us settings, retrieval inactive"
      return
    scheduler = self.getPlugin("SchedulerPlugin")
    # schedule fetch to occur every half an hour from now
    scheduler.addIntervalTask(self.fetchLinks, "Update linkblog", 5, 1800, scheduler.PM_SEQUENTIAL, [])

  def fetchLinks(self):
    # remove some del.icio.us tags
    remove = ['for:links','post:links']
    store = self.getAppContext().store
    
    result = fetch.fetchURL(self.url)
    
    if self.format == 'rss':
      # remove escape chars from the whole buffer and convert all entities after parsing
      soup = BeautifulSoup(result['data'].replace("#x26;",""), convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
      for item in soup('item'):
        try: 
          author = self.authors[item('dc:creator')[0].contents[0]]
        except:
          author = self.getWebApp().getConfigItem('author')
        try:
          when = time.strptime(item('dc:date')[0].contents[0],'%Y-%m-%dT%H:%M:%SZ')
          uri = item['rdf:about']
          _headers = "X-Link: %s" % uri
          title = item('title')[0].contents[0]
          content = item('description')[0].contents[0]
          date = time.strftime("%Y-%m-%d %H:%M:%S", when)
          tags = ', '.join([x for x in item('dc:subject')[0].contents[0].split(' ') if x not in remove])
          keywords = tags = tags.replace(',,',',') # remove double commas
          page = time.strftime("links/%Y/%m/%d/%H%M",when)
          categories = "Links"
          markup = self.getWebApp().getConfigItem('markup')
          if not store.mtime(page):
            store.updatePage(page,vars())
        except:
          pass
    elif self.format == 'json':
      nuts = simplejson.loads(result['data'])
      for item in nuts['value']['items']:
        try: 
          author = self.authors[item['dc:creator']]
        except:
          author = self.getWebApp().getConfigItem('author')
        try:
          when = time.strptime(item['dc:date'],'%Y-%m-%dT%H:%M:%SZ')
          uri = item['rdf:about']
          _headers = "X-Link: %s" % uri
          title = item['title']
          content = item['description']
          date = time.strftime("%Y-%m-%d %H:%M:%S", when)
          tags = ', '.join([x for x in item['dc:subject'].split(' ') if x not in remove])
          keywords = tags = tags.replace(',,',',') # remove double commas
          page = time.strftime("links/%Y/%m/%d/%H%M",when)
          categories = "Links"
          markup = self.getWebApp().getConfigItem('markup')
          if not store.mtime(page):
            store.updatePage(page,vars())
        except:
          pass
      
class Attachment(Snakelet):
  """Attachment requests"""
  def getDescription(self):
    return "Wiki Attachment Locator"

  def allowCaching(self):
    return False # we want to handle it ourselves
    
  def requiresSession(self):
    return self.SESSION_NOT_NEEDED
  
  def serve(self, request, response):
    request.setEncoding("UTF-8")
    response.setEncoding("UTF-8")
    a = self.getWebApp()
    c = request.getContext()
    c.fullurl = request.getBaseURL() + request.getFullQueryArgs()
    path = urllib.unquote((request.getPathInfo())[1:])
    (page,attachment) = os.path.split(path)
    if attachment != BASE_FILENAME:
      c = self.getAppContext()
      filename = c.store.getAttachmentFilename(page,attachment)
      if os.path.exists(filename):
        stats = os.stat(filename)
        response.setHeader("Cache-Control",'max-age=86400')
        response.setHeader("Expires", httpTime(time.time() + 86400))
        (etag,lmod) = a.create_ETag_LMod_headers(stats.st_mtime, stats.st_size, stats.st_ino) 
        response.setHeader("Last-Modified", lmod)
        response.setHeader("Etag", etag)
        a.serveStaticFile(filename, response, useResponseHeaders=False)
    response.setResponse(404, "Not Found")
    return None

class Wiki(Snakelet):
  """Wiki Engine"""
  def getDescription(self):
    return "Wiki Engine"

  def allowCaching(self):
    return False # we want to handle it ourselves

  def serve(self, request, response):
    request.setEncoding("UTF-8")
    response.setEncoding("UTF-8")
    ac = self.getAppContext()
    a = self.getWebApp()
    if ac.indexer.ready != True:
      print "redirecting"
      ac = request.getContext()
      response.setHeader("X-Dialtone",'Busy, Please Hold')
      self.redirect('/wait.y', request, response)
      return
    
    c = request.getContext()
    c.fullurl = request.getBaseURL() + request.getFullQueryArgs()
    self.i18n = Locale.i18n[ac.locale]
    try:
      # parse page name out of URL
      c.path = (request.getPathInfo())[1:]
      
      # If there was no specific page requested, then render the HomePage
      if c.path == '':
        c.path = 'HomePage'
      
      # Get the actual page contents
      page = self.getPage(request, response)
      
      # Try to fail gracefully ASAP
      if page == None:
        out = response.getOutput()
        print >>out,""
        return

      (c.headers, c.content) = page
      c.title = c.headers['title']
      c.postinfo = renderInfo(self.i18n, c.headers)
      
      # Manage page trail (breadcrumbs) 
      r = request.getSessionContext()
      if r is not None:
        try:
          if c.headers['name'] not in r.trail:
            r.trail.append(c.headers['name'])
          if len(r.trail) > 10:
            r.trail = r.trail[-10:]
        except:
          r.trail = [c.headers['name']]
      try:
        trail = []
        for crumb in r.trail:
          info = ac.indexer.pageinfo[crumb]
          # add absolute URL prefix to link
          info['link'] = ac.base + info['name']
          trail.append(info)
        c.trail = '<p>' + self.i18n['pagetrail'] + ': ' + pagetrail(trail[-10:]) + '</p>'
      except:
        c.trail = ''
      
      references = {}
      c.seealso = ""
      try:
        links = ac.indexer.backlinks[c.headers['name']]
        links.extend(ac.indexer.wikilinks[c.headers['name']])
        unique = Set(links)
        links = [unique.pop() for i in range(0,len(unique))]
        if len(links) > 0:
          for link in links:
            references[ac.base+link] = ac.indexer.pageinfo[link]
          c.seealso = "<p>%s</p>" % self.i18n['seealso'] + linktable(self.i18n,references)  
      except KeyError:
        c.seealso = '<div class="warning">' + self.i18n['indexing_message'] + '</div>'
          
      maxage = self.getWebApp().getConfigItem('maxage')
      if 'x-cache-control' in c.headers.keys():
        c.cachecontrol = "public, " + c.headers['x-cache-control']
        m = MAX_AGE_REGEX.match(c.headers['x-cache-control'])
        if m:
          maxage = int(m.group(3))
      # Use cache metadata to generate HTTP headers
      try:
        stats = ac.cache.stats("soup:"+c.path)
        (c.etag,c.lastmodified) = a.create_ETag_LMod_headers(stats.st_mtime, stats.st_size, stats.st_ino) 
      except:
        c.etag = ''
        c.lastmodified =  httpTime(time.time())
        c.cachecontrol = ''
      
      # If we're not running indexing, then pages should be cached a bit longer
      if ac.indexer.done:
        c.expires = httpTime(time.time() + maxage)
      else:
        c.expires = httpTime(time.time())
      
      # The Answer, obviously
      response.setHeader("X-Answer",'42')
      
      # Generate c.comments
      formatComments(ac,request,c.path)

      posttitle = c.title
      permalink = ac.base + c.path
      description = self.i18n['permalink_description']
      linkclass = "wikilink"
      # Insert outbound links if necessary
      if "x-link" in c.headers:
        uri = c.headers['x-link']
        (schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri)
        permalink = uri
        #posttitle = self.i18n[schema]['title'] % {'uri':uri}
        linkclass   = self.i18n['uri_schemas'][schema]['class']
        description = self.i18n['external_link_format'] % cgi.escape(uri)
      postinfo = c.postinfo
      content = c.content
      comments = c.comments
      # Use a simplified format for the HomePage (less cruft)
      if c.path == "HomePage":
        c.postbody = ac.templates['simplified'] % locals()
      else:
        c.postbody = ac.templates['generic'] % locals()
      c.sitename = ac.siteinfo['sitename']
      c.sitedescription = ac.siteinfo['sitedescription']
      self.redirect('/wiki.y', request, response)
    except Warning, e:
      c.status = e.value
      (c.headers, c.content) = self.getPage(request, response)
      self.redirect('/wiki.y', request, response)
  
  def requiresSession(self):
    return self.SESSION_WANTED
  
  def dumpTable(self, request):
    h=request.getAllHeaders()
    buffer = '<table class="data">'
    i=0
    for k, v in  h.items():
      style = ('',' class="odd"')[i%2]
      buffer = buffer + '<tr><td%s>%s</td><td%s>%s</td></tr>' % (style, k, style, v)
      i = i + 1
    buffer = buffer + '</table>'
    return buffer
  
  def getMarkup(self, request, response):
    path = (request.getPathInfo())[1:]
    c = self.getAppContext()
    try:
      page = c.store.getRevision(path)
    except:
      page = c.store.getRevision("meta/EmptyPage")
    buffer = page.body
    return buffer
  
  def getPage(self, request, response):
    """TODO: Change this to access the Indexer instead of the Store"""
    path = urllib.unquote((request.getPathInfo())[1:])
    # replace the above with:
    # path = '/'.join(request.getRequestURL().split('/')[2:])
    # if the snakelet matches an arbitrary pattern
    if path == '':
      path = 'HomePage'
    a = self.getWebApp()
    ac = self.getAppContext()
    buffer = request.getHeader('If-Modified-Since')
    if buffer != None:
      since = time.mktime(rfc822.parsedate(buffer))
      try:
        # see if our page has been rendered and has a modification time
        our = ac.cache.mtime('soup:' + path)
        if(since > our):
          # Reset some headers
          response.setHeader("Cache-Control",'')
          response.setHeader("Pragma",'')
          response.setHeader("Expires",'')
          # Say bye bye
          response.setResponse(304, "Not Modified")
          return None
      except KeyError:
        pass
    
    # Check for any standing redirects
    redirect = self.checkRedirects(ac,path)
    if redirect:
      response.HTTPredirect(ac.base + redirect)
      return None
      
    # Check for a URL variant
    try:
      page = ac.store.getRevision(path)
    except IOError:
      alias = ac.indexer.resolveAlias(path)
      if alias != path:
        response.HTTPredirect(ac.base + alias)
        return
      else:
        page = ac.store.getRevision("meta/EmptyPage")
        return (page.headers, renderPage(ac,page,request,response,ac.indexer.done))
    if 'x-redirect' in page.headers.keys():
      uri = page.headers['x-redirect']
      (schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri)
      if schema in self.i18n['uri_schemas'].keys():
        path = uri
      else:
        path = ac.base + path
      response.HTTPredirect(path)
    return (page.headers, renderPage(ac,page,request,response,ac.indexer.done))

  def checkRedirects(self, appcontext, page):
    try:
      redirects = appcontext.redirects
    except:
      return None
    for pattern in redirects.keys():
      redirect = re.sub(pattern,redirects[pattern], page)
      if cmp(redirect,page):
        return redirect
    return None
