'''
Created on Feb 4, 2013

@author: chrisk
'''
import argparse
import pdb
import traceback
import sys
import urllib2
import lxml.etree
import time
import catfeed
import re
import StringIO


def get_argparser():
    argparser = argparse.ArgumentParser(description='Concatenates Atom and RSS feeds. Version %s' %
                                        catfeed.__version__)
    argparser.add_argument('-f', '--follow', action='store_true', default=False,
                           help="Follow the feeds by polling in intervals")
    argparser.add_argument('-b', '--backlog', action='store', metavar="N", type=int,
                           help="Number of already existing items to include per feed")
    argparser.add_argument('-B', '--backlog-total', action='store', metavar="N", type=int,
                           help="Number of already existing items to include totally")
    argparser.add_argument('-n', '--new-items', action='store', metavar="N", type=int,
                           help="Maximum number of new items to include before stopping.")
    argparser.add_argument('-p', '--poll-times', action='store', metavar="N", type=int,
                           help="Maximum number of times to poll")
    argparser.add_argument('-i', '--interval', action='store', metavar="SECONDS", type=int,
                           default=600, help="Interval in seconds between polls. Default: %(default)s")
    argparser.add_argument('--pdb', action='store_true', default=False,
                           help='Puts you in pdb mode if any exceptions are raised.')
    argparser.add_argument('-s', '--summary', action='store_true', default=False,
                           help='Print summary on exit.')
    argparser.add_argument('-o', '--output', action='store', metavar="FILE", default="-",
                           help="Output file. Default: %(default)s")
    argparser.add_argument('--entry2item', action='store_true', default=False,
                           help='Convert Atom entries to RSS items.')
    argparser.add_argument('feed_urls', nargs=argparse.REMAINDER)
    return argparser

def main():
    parser = get_argparser()
    args = parser.parse_args()
    if len(args.feed_urls) == 0:
        sys.stderr.write("No feed urls set.\n")
        return
    feedhandler = FeedHandler(args.feed_urls, args.backlog, args.backlog_total, args.follow,
                              args.new_items, args.poll_times, args.interval, args.summary,
                              args.output, args.entry2item)
    try:
        feedhandler.run()
    except:
        # Except KeyboardInterrupt
        sys.stderr.write(traceback.format_exc())
        if args.pdb:
            pdb.post_mortem()
    if args.summary:
        feedhandler.print_summary()


class FeedHandler(object):
    def __init__(self, p_feed_urls, p_backlog_feed, p_backlog_total, p_follow, p_new_items,
                 p_poll_times, p_interval, p_summary, p_output, p_entry2item=False):
        self.feed_urls = p_feed_urls
        self.total_count = 0
        self.total_backlog_max = p_backlog_total
        self.backlog_feed_max = p_backlog_feed
        self.url_last_titles_dict = dict()
        self.follow = p_follow
        self.max_new_items = p_new_items
        self.max_poll_times = p_poll_times
        self.interval = p_interval
        self.summary = p_summary
        self.poll_time = None
        self.poll_counter = 0
        self.new_items_counter = 0
        self.first_poll_time = None
        self.do_entry2item = p_entry2item
        if p_output == "-":
            self.out = sys.stdout
        else:
            self.out = open(p_output, "a")

    def run(self):
        if self.backlog_feed_max != 0 or self.total_backlog_max != 0:
            self.handle_backlog()
        if self.follow:
            self.handle_polling()

    def handle_backlog(self):
        self.handle_sleep_interval()  # Register first fetch to know when next should be
        for url in self.feed_urls:
            self.poll_and_print_new_items(url, p_backlog=True)
            if (self.total_backlog_max != None and
                self.total_backlog_max <= self.total_count):
                break
        self.poll_counter += 1

    def handle_polling(self):
        while not self.is_finished():
            self.handle_sleep_interval()
            for url in self.feed_urls:
                self.poll_and_print_new_items(url)
            self.poll_counter += 1

    def handle_sleep_interval(self):
        if self.poll_time == None:
            self.poll_time = time.time()
            self.first_poll_time = self.poll_time
        else:
            self.poll_time += self.interval
            sleep_time = self.poll_time - time.time()
            if sleep_time > 0:
                time.sleep(sleep_time)

    def is_finished(self):
        if (self.max_poll_times and
            self.max_poll_times <= self.poll_counter):
            return True
        elif (self.max_new_items != None and
              self.max_new_items <= self.new_items_counter):
            return True
        else:
            return False

    def is_backlog_finished(self, p_item_count):
        if ((self.total_backlog_max != None and
             self.total_backlog_max <= self.total_count) or
            (self.backlog_feed_max != None and
             self.backlog_feed_max <= p_item_count)):
            return True
        else:
            return False

    def poll_and_print_new_items(self, p_url, p_backlog=False):
        item_count = 0
        contentr = StringIO.StringIO(urllib2.urlopen(p_url).read())
        if contentr.getvalue().find('http://www.w3.org/2005/Atom') != -1:
            item_tag = '{http://www.w3.org/2005/Atom}entry'
        else:
            item_tag = 'item'
        parser = lxml.etree.iterparse(contentr, tag=item_tag)
        backlog = []
        if self.total_count != 0:
            first_item = True
        else:
            first_item = False
        last_titles = self.url_last_titles_dict.get(p_url, [])
        available_titles = []
        is_broke = False
        for event, element in parser:
            title = self.append_available_title(available_titles, element)
            if ((not p_backlog and self.is_finished()) or
                (p_backlog and self.is_backlog_finished(item_count))):
                is_broke = True
                break
            if title in last_titles:
                is_broke = True
                break
            element_str = self.element2str(element)
            backlog.append(element_str)
            item_count += 1
            self.total_count += 1
            self.new_items_counter += 1
        if is_broke:
            # We need to keep parsing so that available_titles will include all items from fetch.
            for event, element in parser:
                self.append_available_title(available_titles, element)
        if len(backlog) > 0:
            backlog.reverse()
            if first_item:
                self.out.write("\n")
            self.out.write("\n\n".join(backlog) + "\n")
            self.out.flush()
        if len(last_titles) > 0 and not last_titles[0] in available_titles:
            # We have either a glitch, or the feed has retracted the last post.
            # Or all load balancing nodes have not been updated.
            if len(available_titles) == 0:  # Keep last_titles since obviously something failed.
                available_titles = last_titles
            last_titles_set = set(last_titles)
            available_titles_set = set(available_titles)
            if available_titles_set.isdisjoint(last_titles_set):
                # We have a glitch, nothing to do about it..
                pass
            else:
                # The feed has been edited in some way.
                if available_titles[0] in last_titles:
                    available_titles = last_titles
                else:
                    pass
        self.url_last_titles_dict.update([(p_url, available_titles)])

    def element2str(self, p_element):
        if self.do_entry2item:
            p_element.tag = 'item'
            # TODO: further conversions making an Atom Entry into an RSS Item
        return lxml.etree.tostring(p_element).strip()

    @staticmethod
    def append_available_title(p_available_titles, p_element):
        if p_element.tag == '{http://www.w3.org/2005/Atom}entry':
            title = p_element.xpath('ns:title', namespaces={'ns':'http://www.w3.org/2005/Atom'})[0].text
        else:
            title = p_element.xpath('title')[0].text
        p_available_titles.append(title)
        return title

    def print_summary(self):
        self.out.write("\n")
        self.out.write("Summary:\n")
        self.out.write("Got totally %s items\n" % self.total_count)
        self.out.write("Got %s items in backlog poll\n" %
                       (self.total_count - self.new_items_counter))
        self.out.write("Polled %s times per feed\n" % self.poll_counter)
        if self.first_poll_time == self.poll_time:
            self.out.write("Poll was made at: %s\n" % self.first_poll_time)
        else:
            self.out.write("First poll was made at: %s and last at: %s\n" %
                           (self.first_poll_time, self.poll_time))
        for url in self.feed_urls:
            self.out.write("Last item title of %s: %s\n" %
                           (url, self.url_last_titles_dict.get(url)[0]))


if __name__ == '__main__':
    main()
