#!/usr/bin/python
# Copyright 2011 Alex K (wtwf.com) All rights reserved.
# $Id:  $

"""downloadfeed - downloads all the items from a feed from google reader.

Makes a bunch of files for each feed
http://googleblog.blogspot.com/atom.xml turns into
googleblog.blogspot.com_atom.000.xml
googleblog.blogspot.com_atom.001.xml
...
googleblog.blogspot.com_atom.143.xml

-u url
--url url
  download the feed at this url
"""

import sys
import os
import urllib
import urllib2
import re
import logging
import getopt

if sys.version_info < (2, 4):
  raise EnvironmentError('You probably need python 2.5 or greater')

CONTINUATION_RE = re.compile('<gr:continuation>([^<]*)</gr:continuation>',
                             re.MULTILINE)


def Usage(code, msg=''):
  if code:
    fd = sys.stderr
  else:
    fd = sys.stdout
  PROGRAM = os.path.basename(sys.argv[0])
  print >> fd, __doc__ % locals()
  if msg:
    print >> fd, msg
  sys.exit(code)



def Main(args):
  logging.basicConfig()
  logging.getLogger().setLevel(logging.INFO)

  try:
    opts, args = getopt.getopt(sys.argv[1:], 'hdu',
                               ['help', 'debug', 'url='])
  except getopt.error, msg:
    Usage(1, msg)

  url = None
  for opt, arg in opts:
    if opt in ('-h', '--help'):
      Usage(0)
    if opt in ('-d', '--debug'):
      logging.info('Setting logging level to debug')
      logging.getLogger().setLevel(logging.DEBUG)
      logging.debug('Set logging level to debug')
    if opt in ('-u', '--url'):
      url = arg
  if not url and args:
    url = args[0]
    del args[0]

  if args or not url:
    Usage(1)

  logging.info('Feed URL: %s', url)
  FetchAllForFeed(url)

def FetchAllForFeed(url):
  slashes = url.find('//')
  if slashes > 0:
    filename = url[slashes + 2:]
  filename = filename.replace('/', '_')
  if filename.lower().endswith('.xml'):
    filename = filename[0:-4]
  logging.info('Filename: %s', filename)
  FetchPartOfFeed(url, filename)

def FetchPartOfFeed(url, filename, file_num=1, continuation=None):
  if continuation:
    continuation = '?c=' + urllib.quote(continuation)
  else:
    continuation = ''

  url_in = urllib2.urlopen('http://www.google.com/reader/public/atom/feed/' +
                           urllib.quote(url) + continuation)

  feed_contents = url_in.read()

  file_out = open('%s.%03d.xml' % (filename, file_num), 'w')
  file_out.write(feed_contents)
  file_out.close()

  matches = CONTINUATION_RE.search(feed_contents)
  if matches:
    # There is more to this feed so go get the next bit!
    continuation = matches.group(1)
    logging.info('next feed is at: %s', continuation)
    FetchPartOfFeed(url, filename,
                    file_num=file_num + 1, continuation=continuation)


if __name__ == '__main__':
  Main(sys.argv)
