#!/usr/bin/python
# Copyright 2007 Alex K (wtwf.com) All rights reserved.
# $Id: expandRss.py,v 1.35 2009-01-07 21:27:56 ark Exp $

"""expandRss takes an rss feed and expands it to contain full posts

-d
  debug first feed
-s
  only parse a single entry
-f
  force execution
-o filename
  only run the job that produces this filename
"""

# latest version available http://wtwf.com/scripts/bin/expandRss
# documented somewhere around http://wtwf.com/scripts/

# TODO(ark) this http://www.osxfaq.com/rss/osxfaq.xml

import sys
import os
import httplib
import urllib
import urlparse
import re
import traceback
import logging
import datetime
import time
import getopt

from xml.dom import minidom
import xml.sax.saxutils

if sys.version_info < (2, 4):
  raise EnvironmentError('You probably need python 2.5 or greater')

######################################################################
outputDir = os.path.expanduser('~/html/wtwf/rss')

debug_single = False

# set this in your expandrss.py to use an image proxy
image_proxy_url = None

def usage(code, msg=''):
  if code:
    fd = sys.stderr
  else:
    fd = sys.stdout
  PROGRAM = os.path.basename(sys.argv[0])
  print >> fd, __doc__ % locals()
  if msg:
    print >> fd, msg
  sys.exit(code)

def SetDescription(item, xmldoc, txt):
  items = item.getElementsByTagName('description')
  if len(items) > 0:
    logging.debug('Using existing description')
    desc = items[0]
    while desc.firstChild:
      desc.removeChild(desc.firstChild)
  else:
    # we didn't find description, now look for Content instead
    items = item.getElementsByTagName('content')
    if len(items) > 0:
        logging.debug('Using content')
        desc = items[0]
        desc.setAttribute('type', 'html')
        while desc.firstChild:
          desc.removeChild(desc.firstChild)
    else:
      # we didn't find description, now look for Summary instead
      items = item.getElementsByTagName('summary')
      if len(items) > 0:
        logging.debug('Using summary')
        desc = items[0]
        desc.setAttribute('type', 'html')
        while desc.firstChild:
          desc.removeChild(desc.firstChild)
      else:
        # o.k. no description, no summary, make a description
        logging.debug('making new description')
        desc = xmldoc.createElement('description')
        item.appendChild(desc)
  desc.appendChild(xmldoc.createTextNode(txt))

def ResolveSnippyUrl(url):
  try:
    urlp = urlparse.urlparse(url)
    conn = httplib.HTTPConnection(urlp.hostname)
    conn.request('GET', urlp.path)
    r1 = conn.getresponse()
    nurl = r1.getheader('location')
    if nurl:
      return nurl
  except:
    pass
  return url

def GetDescription(item):
  tags = ['content', 'content:encoded', 'description', 'summary']

  for tag in tags:
    items = item.getElementsByTagName(tag)
    if len(items) > 0:
      logging.debug('GetDescription: found in %s tag', tag)
      return items[0].firstChild.data
  raise LookupError('Unable to find Description')


def SetLink(item, xmldoc, txt):
  """Set's the link object to a new url,
  but also adds an origlink so that we can still check if items are
  new or not"""

  l = item.getElementsByTagName('link')[0].firstChild
  orig = xmldoc.createElement('origlink')
  orig.appendChild(xmldoc.createTextNode(l.data))
  item.appendChild(orig)
  l.data = txt

  # now set guid if it's a permalink
  try:
    guid = item.getElementsByTagName('guid')[0]
    if (guid.getAttribute('isPermaLink') == 'true'):
      guid.firstChild.data = txt
  finally:
    pass


def GetLinkFromItem(item, url_replace=None):
  links = item.getElementsByTagName('link')
  ans = None
  for link in links:
    if link.firstChild and link.firstChild.data:
      ans = link.firstChild.data
      break
    if link.getAttribute('rel') == 'alternate':
      ans = link.getAttribute('href')
      break
  if ans and url_replace:
    for pattern, repl in url_replace:
      ans = re.sub(pattern, repl, ans)
  return str(ans)

SRC_RE = re.compile(r"""(?P<pre>\ssrc=["']?)(?P<src>[^ "'>]+)(?P<post>[ "'>])""", re.IGNORECASE)

def FixRelativeUrls(base, txt):
  """Replace all relative urls in txt with absolute urls."""
  return SRC_RE.sub(lambda x: (x.group('pre') +
                               urlparse.urljoin(base, x.group('src')) +
                               x.group('post')),
                    txt)

def FixSingleSrc_(match):
  global image_proxy_url
  if not image_proxy_url:
    logging.fatal('image_proxy_url must be set in .expandrss.py')
  src = match.group('src')
  ext = os.path.splitext(src)[1]
  if ext.lower() in ('.gif', '.png', '.jpg', '.jpeg'):
    return (match.group('pre') +
            image_proxy_url + src +
            match.group('post'))
  else:
    return match.group(0)


def AddProxyToUrls(txt):
  """Add a proxy to all image srcs."""
  return SRC_RE.sub(FixSingleSrc_, txt)


class feedExpander:
  def __init__(self, url, destination,
               begin_str, end_str,
               include_begin=0,
               include_end=0,
               replace=None,
               feedreplace=None,
               itemreplace=None,
               fetchcommand=None,
               encoding=None,
               interval=None,
               url_replace=None,
               absolute_urls=False,
               absolute_base=None,
               proxy_images=False,
               item_limit=None,  # only enhance this number of items
               ):
    self.url = url
    self.destination = destination
    self.begin_str = begin_str
    self.end_str = end_str
    self.include_begin = include_begin
    self.include_end = include_end
    self.fetchcommand = fetchcommand
    if replace:
      self.replace = replace
    else:
      self.replace = []
    if feedreplace:
      self.feedreplace = feedreplace
    else:
      self.feedreplace = []
    if itemreplace:
      self.itemreplace = itemreplace
    else:
      self.itemreplace = []
    self.encoding = encoding

    self.absolute_urls = absolute_urls or proxy_images
    self.absolute_base = absolute_base
    self.proxy_images = proxy_images

    self.global_begin_re = None
    self.global_end_re = None

    if self.begin_str:
      if hasattr(self.begin_str, 'search'):
        self.global_begin_re = self.begin_str
        self.begin_str = 'REGEXP: ' + self.begin_str.pattern
      elif self.begin_str.find('%') == -1:
        self.global_begin_re = re.compile(self.begin_str)

    if self.end_str:
      if hasattr(self.end_str, 'search'):
        self.global_end_re = self.end_str
        self.end_str = 'REGEXP: ' + self.end_str.pattern
      elif self.end_str.find('%') == -1:
        self.global_end_re = re.compile(self.end_str)

    self.interval = interval
    if url_replace:
      self.url_replace = url_replace
    else:
      self.url_replace = None
    self.item_limit = item_limit


  def enhance(self, item, xmldoc):
    if self.item_limit is 0:
      logging.debug("Reached item_limit no longer enhancing")
      return
    if self.item_limit:
      self.item_limit -= 1
    link = GetLinkFromItem(item, self.url_replace)
    txt = ''
    go = 0
    logging.info('Opening: %s', link)
    idx = link.find('#')
    anchor = ''
    if idx != 0:
      anchor =  link[idx+1:]
    logging.debug('Anchor is %s', anchor)
    if self.begin_str:
      logging.debug('start is %s', self.begin_str % locals())

      if self.fetchcommand:
        urlh = os.popen(self.fetchcommand % link)
      else:
        urlh = urllib.urlopen(link)
      lines = urlh.readlines()
      urlh.close()

      if len(lines) == 1:
        lines = lines[0].split('\r')
        logging.debug('There are NOW %d lines', len(lines))

      for line in lines:
        line = DoReplace(line, self.replace)
        if self.encoding:
          logging.debug('decoding using: %s', self.encoding)
          line = line.decode(self.encoding, 'ignore')
        if self.global_begin_re:
          begin_re = self.global_begin_re
        else:
          begin_re = re.compile(self.begin_str % locals())
        if begin_re.search(line):
          logging.debug('Found Start: %s IN %r', self.begin_str, line)
          if self.include_begin:
            txt += line
          go = 1
          continue
        if self.global_end_re:
          end_re = self.global_end_re
        else:
          end_re = re.compile(self.end_str % locals())
        if txt and end_re.search(line):
          logging.debug('Found End: %s IN %r', self.end_str, line)
          if self.include_end:
            txt += line
          break
        if go:
          txt += line
    else:
      txt = GetDescription(item)

    if self.itemreplace:
      logging.debug('Doing Item replace')
      txt = DoReplace(txt, self.itemreplace)

    if self.absolute_urls:
      base = link
      if self.absolute_base:
        base = self.absolute_base
      else:
        urlp = urlparse.urlparse(base)
        if urlp.hostname in ('bit.ly', 'www.bit.ly', 'feedproxy.google.com'):
          base = ResolveSnippyUrl(base)
      logging.debug('Making urls absolute from: %s', base)
      txt = FixRelativeUrls(base, txt)

    if self.proxy_images:
      logging.debug('Adding Proxy to images')
      txt = AddProxyToUrls(txt)

    if (txt):
      SetDescription(item, xmldoc, txt.decode('utf-8', 'ignore'))
    else:
      logging.error('Error: no text found')

def Match(line, str, vars):
  if hasattr(str, 'search'):
    # assume it's a regex
    return str.search(line)
  else:
    return line.find(str % vars) != -1

######################################################################

class AppURLopener(urllib.FancyURLopener):
    version = 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.10) Gecko/20050716 Firefox/1.0.6'

urllib._urlopener = AppURLopener()

class b3taExpander:
  def __init__(self, destination):
    self.url = 'http://b3ta.com/xml/newsletter.php?rdf=1'
    self.destination = destination
    self.doneItems = 0
    self.hrefre = re.compile("""href=(("http[^"]*")|('http[^']*')|(http[^"' ]*))""") #"
    self.striptags = re.compile(' ?(<[^>]*>)|(&[^;]*;) ?')
    self.interval = datetime.timedelta(days=1)

  def enhance(self, item, xmldoc):
    if debug_single and self.doneItems > 0:
      return

    items = []

    # hit b3ta.com and get the newsletter
    link = item.getElementsByTagName('link')[0].firstChild.data
    logging.info('Opening %s', link)
    urlh = urllib.urlopen(link)
    state = 0

    sections = []
    txt = ''
    tit = ''
    for line in urlh.readlines():
      if line.find('<a name="line') != -1:
        if state == 0:
          state = 1
      else:
        if state == 1:
          break
      if state == 1:
        if not tit:
          if len(line) > 22 and line.find('------------') == -1:
            tit = line.decode('ISO-8859-1')
        txt += line.decode('ISO-8859-1') + '<br />'
        if line.find('href="htt') != -1:
          sections.append((tit, txt))
          txt = ''
          tit = ''

    urlh.close()

    if txt:
      sections.append((tit, txt))

    logging.info('We found %d sections', len(sections))

    for (tit, txt) in sections:
      i = xmldoc.createElement('item')
      title = xmldoc.createElement('title')
      description = xmldoc.createElement('description')

      tit = self.striptags.sub('', tit)

      title.appendChild(xmldoc.createTextNode(tit))

      mobj = self.hrefre.search(txt)
      if mobj:
        logging.info('link: %s', mobj.group(1))
        link = xmldoc.createElement('link')
        link.appendChild(xmldoc.createTextNode(mobj.group(1).strip("\"' ")))
        i.appendChild(link)
      # txt = unicode(txt, 'utf-8')
      description.appendChild(xmldoc.createTextNode(txt))
      i.appendChild(title)
      i.appendChild(description)
      items.append(i)

    self.doneItems += 1
    return items


############################################################

class farkIt:
  """ take the fark feed and replace non photoshop or caption links with
  a link to the article like this
  http://www.fark.com/cgi/go.pl?IDLink=1832933
  instead of
  http://www.fark.com/cgi/comments.pl?IDLink=1832933
  """

  def __init__(self, destination):
    self.url = 'http://www.fark.com/fark.rss'
    self.destination = destination
    self.done = 0
    self.dontdo = re.compile('\[(Photoshop|Caption)\]', re.IGNORECASE)

  def enhance(self, item, xmldoc):
    if self.done:
      return

    link = item.getElementsByTagName('link')[0].firstChild.data
    tit = item.getElementsByTagName('title')[0].firstChild.data
    linkid = link.rsplit('/',1 )[1]
    origlink = 'http://www.fark.com/cgi/go.pl?i=' + linkid
    if not self.dontdo.search(tit):
      SetLink(item, xmldoc, origlink)

      # now add a link to view comments in the description
      desc = item.getElementsByTagName('description')[0].firstChild.data
      SetDescription(item, xmldoc,
                     desc +
                     ('<p><a href="%s">View Comments</a> ' % link) +
                     ('<a href="%s">View Item</a></p>' % origlink))
    else:
      # add a way to get to the original pic
      desc = item.getElementsByTagName('description')[0].firstChild.data
      SetDescription(item, xmldoc,
                     desc + '<p><a href="%s">View Source</a></p>' % origlink)

      # for photoshop contests we want to link to the voting results
      SetLink(item, xmldoc,
              'http://www.fark.com/comments/' +
              linkid + '?tt=voteresults0')

    tit = xml.sax.saxutils.unescape(tit, {'&quot;': '"'})
    item.getElementsByTagName('title')[0].firstChild.data = tit

class pbfIt:
  """Perry Bible Fellowship feed, replace links to comics with the comics
  http://www.pbfcomics.com/archive/PBF224-Commander_Crisp.jpg
  instead of
  http://www.pbfcomics.com?cid=PBF223-Box_of_Hate.jpg#208
  """

  def __init__(self, destination):
    self.url = 'http://pbfcomics.com/feed/feed.xml'
    self.destination = destination
    self.interval = datetime.timedelta(days=1)

  def enhance(self, item, xmldoc):
    global image_proxy_url

    link = item.getElementsByTagName('link')[0].firstChild.data

    start = link.find('cid=')
    end = link.find('#', start)
    if end == -1:
      link = link[start + 4:]
    else:
      link = link[start + 4:end]
    txt = ('<img src="%shttp://www.pbfcomics.com/archive_b/%s" alt=""/>' %
           (image_proxy_url, link))
    SetDescription(item, xmldoc, txt)

class btafIt:
  """Bob The Angry Flower
  http://angryflower.com/panicm.gif
  instead of
  http://angryflower.com/panicm.html
  """

  def __init__(self, destination):
    self.url = 'http://interglacial.com/rss/bob_the_angry_flower.rss'
    self.destination = destination
    self.interval = datetime.timedelta(days=1)

  def enhance(self, item, xmldoc):
    link = item.getElementsByTagName('link')[0].firstChild.data
    txt = '<img src="%s" alt=""/>' % link.replace('.html', '.gif')
    SetDescription(item, xmldoc, txt)

def GetLink(x, name=None):
  if not name:
    ans = GetLink(x, name='origlink')
    if ans:
      return ans
    name = 'link'

  if (x and
      x.getElementsByTagName(name) and
      len(x.getElementsByTagName(name)) > 0 and
      x.getElementsByTagName(name)[0].firstChild and
      x.getElementsByTagName(name)[0].firstChild.data):
    return x.getElementsByTagName(name)[0].firstChild.data
  else:
    if name != 'origlink':
      if x.firstChild and x.firstChild.firstChild:
        logging.debug('Unknown item! : %s', x.firstChild.firstChild.data)
      else:
        logging.debug('Unknown item! (no fcfc): %s', x.data)
    return None


def Main(args):
  global debug_single
  force = False

  logging.basicConfig()
  logging.getLogger().setLevel(logging.INFO)

  try:
    opts, args = getopt.getopt(sys.argv[1:], 'hdsfc:o:',
                               ['help', 'debug', 'single_item', 'force',
                                'config=', 'only='])
  except getopt.error, msg:
    usage(1, msg)

  if args:
    usage(1)

  userfile = None
  only = None
  for opt, arg in opts:
    if opt in ('-h', '--help'):
      usage(0)
    if opt in ('-d', '--debug'):
      logging.info('Setting logging level to debug')
      logging.getLogger().setLevel(logging.DEBUG)
      logging.debug('Setting logging level to debug')
    if opt in ('-s', '--single_item'):
      debug_single = True
    if opt in ('-f', '--force'):
      force = True
    if opt in ('-c', '--config'):
      userfile = arg
    if opt in ('-o', '--only'):
      only = arg


  # possibly load something extra...
  if userfile is None:
    userfile = os.path.expanduser('~/.expandRssrc.py')
  if os.path.exists(userfile):
    logging.debug('Loading: %s', userfile)
    execfile(userfile)

  logging.debug('There are %d feeds', len(feeds))

  for fs in feeds:
    if fs:
      try:
        if only is None or only in fs.destination:
          ProcessFeed(fs, force)
      except:
        logging.info('\n\nError with: %s\n', fs.url)
        traceback.print_exc()

def DoReplace(orig_str, arr):
  if arr:
    for (src, dst) in arr:
      logging.debug('Doing replace: %r %r', src, dst)
      logging.debug('on %r', orig_str)
      orig_str = re.sub(src, dst, orig_str)
  return orig_str

def ProcessFeed(fs, force):
  destination = os.path.join(outputDir, fs.destination)

  # Perhaps we don't need to process the feed if the interval hasn't passed
  if (not force and
      os.path.exists(destination) and
      os.path.getsize(destination) > 0 and
      hasattr(fs, 'interval') and fs.interval):
    now = datetime.datetime.now()
    if_before = now - fs.interval
    now = time.mktime(now.timetuple())
    if_before = time.mktime(if_before.timetuple())
    if_before = now - ((now - if_before) * 1.1)
    then = os.path.getmtime(destination)
    fname = os.path.basename(destination)
    if then < if_before:
      logging.info('We should do %s: %s < %s',
                   fname, time.ctime(then), time.ctime(if_before))
    else:
      logging.info('feed %s is not old enough: %s > %s',
                   fname, time.ctime(then), time.ctime(if_before))
      return

  # get and parse the feed
  body = urllib.urlopen(fs.url).read()
  if hasattr(fs, 'feedreplace'):
    body = DoReplace(body, fs.feedreplace)

  xmldoc = minidom.parseString(body)

  # see if we need to change anything
  if (not force and
      os.path.exists(destination) and os.path.getsize(destination) > 0):
    logging.debug('Looking in current feed for: %s', fs.destination)
    # now read in the file
    fh = open(destination, 'r')
    origxml = minidom.parseString(fh.read(-1))
    fh.close()
    origlinks = [GetLink(x) for x in origxml.getElementsByTagName('item')]

    for item in xmldoc.getElementsByTagName('item'):
      if not GetLink(item) in origlinks:
        logging.info('We found something new for: %s', fs.url)
        break
    else:
      logging.debug('Nothing new found in current feed')
      now = time.time()
      os.utime(destination, (now, now))
      return

  logging.info('Finding doc contents for: %s' % fs.url)
  # now replace the descriptions
  all_items = xmldoc.getElementsByTagName('item')
  if not all_items:
    all_items = xmldoc.getElementsByTagName('entry')
  for item in all_items:
    try:
      items = fs.enhance(item, xmldoc)
      if items:
        items.reverse()
        for x in items:
          if item.nextSibling:
            item.parentNode.insertBefore(x, item.nextSibling)
          else:
            item.parentNode.appendChild(x)
      if debug_single:
        logging.info('Only expading one item since debug is set')
        break
    except:
      if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('\n\nError with: %s\n', fs.url)
        traceback.print_exc()
        raise
      pass

  logging.info('Writing doc: ' + destination)
  fh = open(destination, 'w')
  fh.write(xmldoc.toxml(encoding='utf-8'))
  fh.close()

######################################################################

# the feeds
feeds = [
  feedExpander('http://feeds.feedburner.com/freakonomicsblog',
               'freakonomics.xml',
               '<div class="blog_post',
               '</div><!-- end blog-post lead -->',
               interval=datetime.timedelta(hours=1),
               ),
  feedExpander('http://www.joyoftech.com/joyoftech/jotblog/atom.xml',
              'jot.xml',
               '../joyimages/',
               '</font>',
               include_begin=True,
               feedreplace=[('\.\./joyimag',
                             'http://www.geekculture.com/joyoftech/joyimag'),
                            ]
               ),
  farkIt('fark.xml'),
  feedExpander('http://bbspot.com/rdf/bbspot.rdf',
               'bbspot.xml',
               '<!--STORY BEGIN-->',
               '<!--STORY END-->',
               ),
  feedExpander('http://unclebob.diaryland.com/index.rss',
               'unclebob.xml',
               '<!--DIARY ENTRY-->',
               '<!--PREV/NEXT LINKS-->',
               itemreplace=[('FONT FACE', 'NOFONT NOFACE'),
                            ]
               ),
  feedExpander('http://www.penny-arcade.com/rss.xml',
               'pennyarcade.xml',
               '"simplecontent"',
               '"buttons"',
               feedreplace =[('<description>', '<description><![CDATA['),
                             ('</description>', ']]></description>'),
                             ('<link', '<someoldlink'),
                             ('</link', '</someoldlink'),
                             ('<feedburner:origLink', '<link'),
                             ('</feedburner:origLink', '</link'),
                             ],
               itemreplace=[('src="/images',
                             'src="http://www.penny-arcade.com/images')],
               fetchcommand="wget '%s' -O -",
               interval=datetime.timedelta(days=1),
               ),
  feedExpander('http://www.jesusandmo.net/feed/atom/',
               'jesusandmo.xml',
               'div id="comic"',
               'div class="post-frontpage"',
               interval=datetime.timedelta(days=1),
               ),
  feedExpander('http://blog.guykawasaki.com/atom.xml',
               'guykawasaki.xml',
               '<div class="entry"',
               '<p class="entry-footer">',
               interval=datetime.timedelta(days=1),
               ),
  b3taExpander('b3ta.xml'),
  btafIt('bobtheangryflower.xml'),
  pbfIt('pbf.xml'),
  ]

if __name__ == '__main__':
  Main(sys.argv)
