#!/usr/bin/env python
# encoding: utf-8
"""
Feeds.py

Created by Rui Carmo on 2007-05-11.
Published under the MIT license.
"""

from snakeserver.snakelet import Snakelet
from BeautifulSoup import *
from Engine import renderPage
from Store import Store
from Utils import *
from Layout import *
import os, time, cgi, re

exclusions = ['^HomePage$','^meta.+']

def filtered(name,namespace,exclusions):
  for pattern in exclusions:
    if re.match(pattern, name):
      return False
  if re.match("^%s.+" % namespace, name):
    return True
  return False

class RSS(Snakelet):
  """
  Feed Generator
  """
  def init(self):
    self.last = 20
    self.ttl = 1800
    
  def getDescription(self):
    return "Feed Generator"

  def allowCaching(self):
    return False
    
  def requiresSession(self):
    return self.SESSION_NOT_NEEDED
  
  def serve(self, request, response):
    request.setEncoding("UTF-8")
    response.setEncoding("UTF-8")
    ac = self.getAppContext()
    s = self.getContext()
    now = time.time()
    ttl = self.ttl/60
    # Try to use the main site URL to avoid trouble with reverse proxying and port numbers
    try:
      siteurl = ac.siteinfo['siteurl'] 
    except:
      siteurl = request.getBaseURL()
    baseurl = siteurl + ac.base
    # pattern is /feeds/filter
    try:
      (dummy,namespace) = request.getFullQueryArgs().split('/',1)
    except:
      response.setResponse(404, "Not Found")
      return
    try:
      if (now - ac.cache.mtime('feeds:' + namespace)) < self.ttl:
        # TODO: add compression
        response.setHeader("Content-Type",'application/rss+xml')
        response.getOutput().write(ac.cache['feeds:' + namespace])
        return
    except:
      pass
    filter = namespace
    if filter not in ac.namespaces:
      filter = '.+'
      namespace = 'wiki'
    recent = [x for x in ac.indexer.recent if filtered(x,filter,exclusions)]
    items = []
    sitetitle = ac.siteinfo['sitetitle']
    sitedescription = ac.siteinfo['sitedescription']
    
    i = 1
    for pagename in recent:
      try:
        page = ac.store.getRevision(pagename)
      except:
        continue # loop if a page goes missing
      try:
        headers = ac.indexer.pageinfo[pagename]
      except:
        headers = page.headers
      # skip non-indexable pages (no point in adding those to a feed)
      if "x-index" in headers:
        if headers["x-index"].lower() == "no":
          continue
      if "title" in headers:
        title = headers['title']
      else:
        title = pagename
      soup = BeautifulSoup(renderPage(ac, page, request = False))
      technorati = technoratiTags(headers,soup)
      self.handleMedia(siteurl,soup)
      description = cgi.escape(unicode(soup)) + cgi.escape(technorati)
      pubdate = httpTime(headers['last-modified'])
      if "x-link" in headers:
        link = headers['x-link']
        guid = baseurl + pagename
      else:
        link = guid = baseurl + pagename
      link = cgi.escape(link)
      guid = cgi.escape(guid)
      category = namespace
      author = headers['from']
      items.append(ac.templates['rss-item'] % locals())
      i = i + 1
      if i > self.last:
        break
    info = ac.siteinfo
    builddate = pubdate = httpTime(time.time())
    if not ac.indexer.done:
      items = ''
      buffer = ac.templates['rss-feed'] % locals()
    else:
      items = ''.join(items)
      buffer = ac.templates['rss-feed'] % locals()
      ac.cache['feeds:' + namespace] = buffer
    # TODO: add compression
    response.setHeader("Content-Type",'application/rss+xml')
    response.getOutput().write(buffer)
    
  def handleMedia(self, siteurl, soup):
    # make all wiki and image URLs absolute
    # - this assumes the content went through BaseURI
    links = soup.findAll('a',{'class':re.compile('wiki.*')})
    for link in links:
      link['href'] = siteurl + link['href']
    images = soup.findAll('img')
    for image in images:
      image['src'] = siteurl + image['src']      
    # remove all scripting
    [script.extract() for script in soup.findAll('script')]
    
