#!/usr/bin/env python

#   
#     newsbiff - A newsgroups monitoring utility
#
#  Copyright (c) 2007, Alex Marandon
#  All rights reserved.
#
#  Redistribution and use in source and binary forms, with or without
#  modification, are permitted provided that the following conditions are met:
#  
#    * Redistributions of source code must retain the above copyright notice,
#      this list of conditions and the following disclaimer.
#    * Redistributions in binary form must reproduce the above copyright notice,
#      this list of conditions and the following disclaimer in the documentation
#      and/or other materials provided with the distribution.
#    * Neither the name of newsbiff nor the names of its contributors
#      may be used to endorse or promote products derived from this software 
#      without specific prior written permission.
#  
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
#  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#  POSSIBILITY OF SUCH DAMAGE.

import ConfigParser, sys, os, time, datetime, string, re
from nntplib import NNTP
from newsrc import Newsrc
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from optparse import OptionParser
from os.path import expanduser, exists

import elementtree.ElementTree as ET

# Redefining the tag serialization routine of the ElementTree module to be
# compatible with atom clients which break on namespace prefixes.
def fixtag(tag, namespaces):
    # given a decorated tag (of the form {uri}tag), return prefixed
    # tag and namespace declaration, if any
    if isinstance(tag, ET.QName):
        tag = tag.text
    namespace_uri, tag = string.split(tag[1:], "}", 1)
    prefix = namespaces.get(namespace_uri)
    if prefix is None:
        prefix = ET._namespace_map.get(namespace_uri)
        if prefix is None:
            prefix = "ns%d" % len(namespaces)
        namespaces[namespace_uri] = prefix
        if prefix == "xml":
            xmlns = None
        else:
            xmlns = ("xmlns", namespace_uri)
    else:
        xmlns = None
    return tag, xmlns

setattr(ET, 'fixtag', fixtag)

DEBUG = False

def camelize(string):
    return ''.join([word.capitalize() for word in string.split('_')])

def perror(string):
    sys.stderr.write(string + "\n")

class ConfigError:
    def __init__(self, msg = ''):
        self.msg = "Configuration error: " + msg

class Conf:

    def __init__(self, file):
        self.config = ConfigParser.ConfigParser()
        try:
            self._parseConfig(file)
        except ConfigError, e:
            raise e
                
    def _parseConfig(self, file):
        self.searches = []
        self.config.readfp(open(file))
        for section in self.config.sections():
            if section == 'global':
                self._parseGlobalSection(section)
            else:
                self._parseSearchSection(section)

    def _parseGlobalSection(self, section):
        if self.config.has_option(section, 'server'):
            self.server = self.config.get(section, 'server')
        else:
            raise ConfigError("Server not specified")

        if self.config.has_option(section, 'server'):
            self.newsrc = self.config.get(section, 'newsrc')
        else:
            raise ConfigError("Newsrc file not specified")

        if self.config.has_option(section, 'feed_base_url'):
            self.feed_base_url = self.config.get(section, 'feed_base_url')

        self._setIntOption(section, 'port', 119)
        self._setIntOption(section, 'context', 30)
        self._setIntOption(section, 'feed_history', 40)

    def _setIntOption(self, section, name, default):
        if self.config.has_option(section, name):
            setattr(self, name, int(self.config.get(section, name)))
        else:
            setattr(self, name, default)

    def _parseSearchSection(self, section):
        if not self.config.has_option(section, 'groups'):
            raise ConfigError(
                "No groups to search for section %s." % section)
        search_type = "all" # The default is to return all articles.
        for possible_type in ('reply', 'subject', 'author', 'thread_size'):
            if self.config.has_option(section, possible_type):
                search_type = possible_type
                break
        groups = self.config.get(section, 'groups').split(',')
        groups = [group.strip() for group in groups] 
        search_string = ''
        if search_type != "all": 
            search_string = self.config.get(section, search_type)
        search = Search(groups, search_type, search_string)
        search.title = str(section)
        if self.config.has_option(section, 'atom_file'):
            search.atom_file = self.config.get(section, 'atom_file')
        else:
            search.atom_file = None
        self.searches.append(search)
    
class Search:
    def __init__(self, groups, type, search_string):
        self.groups = groups
        self.type = type
        self.search_string = search_string

class NewsgroupArticle:
    def __init__(self, nntp, group, tuple):
        self.nntp = nntp
        self.newsgroup = group
        self.number  = int(tuple[0])
        self.Subject = tuple[1]
        self.From    = tuple[2]
        self.Date    = tuple[3]
        self.MessageID = tuple[4]
        self.References = tuple[5]
        self.Lines = tuple[7]
        self.body = None

    def __str__(self):
        result = ''
        result += "Newsgroup: " + self.newsgroup + "\n"
        result += "Number: " + str(self.number) + "\n"
        result += "Subject: " + self.Subject + "\n"
        result += "From: " + self.From + "\n"
        result += "Date: " + self.Date + "\n"
        result += "Message-ID: " + self.MessageID + "\n\n"
        result += "\n"
        result += self.getBody()
        return result

    def getBody(self):
        if not self.body:
            if DEBUG: print "Retrieving from %s body of message %s." % \
                    (self.newsgroup, self.MessageID)
            self.body = self.nntp.body(self.MessageID)[3]
        return ("\n").join(self.body) + "\n"

class SearchEngine:

    def __init__(self, config_filename):
        self.conf = Conf(config_filename)
        self.nntp = NNTP(self.conf.server, self.conf.port)
        self.results = []
        self.newsrc = Newsrc(self.conf.newsrc)
        self.run()

    def run(self):
        for search in self.conf.searches:
            searchMethod = getattr(self, "search" + camelize(search.type))
            search_results = []
            for group in search.groups:
                if not self.newsrc.hasGroup(group):
                    self.newsrc.addGroup(group)
                    if DEBUG: print "Group %s added to newsrc file." % group
                first, last = self.nntp.group(group)[2:4]
                articles_tuples = self.nntp.xover(first, last)[1]
                articles_objects = [NewsgroupArticle(self.nntp, group, article) for article in
                        articles_tuples]
                search_results += searchMethod(articles_objects, search.search_string)
            if search.atom_file:
                renderer = AtomRenderer(search.atom_file, search.title,
                        self.conf.feed_base_url + search.atom_file, self.conf.feed_history)
                renderer.render(search_results)

        for article in self.results:
            if not self.newsrc.isMarked(article.newsgroup, article.number):
                self.newsrc.mark(article.newsgroup, article.number)
        self.newsrc.save()

    def searchAll(self, articles, string = ''):
        for article in articles:
            self._appendNewArticleToResult(article, [])
        return articles

    def searchThreadSize(self, articles, size):
        size = int(size)
        threads = {}
        # Collecting root articles
        for article in articles:
            if len(article.References) == 0:
                threads[article] = []

        # Collecting children of root articles
        for article in articles:
            for root_article, children in threads.items():
                if root_article.MessageID in article.References:
                    children.append(article)

        # Filtering threads by size
        search_results = []
        for root_article, children in threads.items():
            if len(children) + 1 >= size:
                self._appendNewArticleToResult(root_article, search_results)
                for article in children:
                    self._appendNewArticleToResult(article, search_results)
        return search_results
    
    def searchReply(self, articles, string):
        original_messages_ids = self._getMessageIdsByAuthor(articles, string)
        search_results = []
        for article in articles:
            references = article.References
            for reference in references:
                if reference in original_messages_ids:
                    self._appendNewArticleToResult(article, search_results)
        return search_results

    def searchSubject(self, articles, string):
        return self._searchHeaderSubstring("Subject", articles, string)

    def searchAuthor(self, articles, string):
        return self._searchHeaderSubstring("From", articles, string)

    def _searchHeaderSubstring(self, header, articles, string):
        search_results = []
        for article in articles:
            if getattr(article, header).find(string) > -1:
                self._appendNewArticleToResult(article, search_results)
        return search_results

    def _appendNewArticleToResult(self, article, results):
        if not self.newsrc.isMarked(article.newsgroup, article.number):
            results.append(article)
            self.results.append(article)

    def _getMessageIdsByAuthor(self, articles, name):
        message_ids = []
        for article in articles:
            if article.From.find(name) > -1:
                message_ids.append(article.MessageID)
        return message_ids

class TextRenderer:

    def __init__(self, results, output_filename = ""):
        self.results = results
        self.output_filename = output_filename

    def render(self):
        sep = ''
        output_string = ''
        for _ in range(80): sep += "="
        output_string = sep + "\n"
        if len(self.results) < 1:
            output_string += "Nothing to tell you."
        for message in self.results:
            output_string += str(message)
            output_string += sep + "\n"
        if self.output_filename == "":
            sys.stdout.write(output_string)
        else:
            open(self.output_filename, 'w').write(output_string)

class RssRenderer:

    def __init__(self, results, output_filename):
        self.fd = open(output_filename, "w")
        self.results = results

    def render(self):
        hd = XMLGenerator(self.fd, 'UTF-8')
        hd.startDocument()
        attr = AttributesImpl({ "version": "2.0", })
        hd.startElement("rss", attr)
        hd.startElement("channel", AttributesImpl({ }))
        hd.startElement("title", AttributesImpl({ }))
        hd.characters("NewsBiff RSS Feed")
        hd.endElement("title")
        hd.startElement("description", AttributesImpl({ }))
        hd.characters("Feed generated from usenet messages")
        hd.endElement("description")
        hd.startElement("link", AttributesImpl({ }))
        hd.characters("http://groups.google.com/")
        hd.endElement("link")
        for message in self.results:
            self.renderMessage(hd, message)
        hd.endElement("channel")
        hd.endElement("rss")
        hd.endDocument()

    def renderMessage(self, hd, message):
        hd.startElement("item", AttributesImpl({ }))
        hd.startElement("title", AttributesImpl({ }))
        hd.characters(unicode(message.Subject, 'Latin-1'))
        hd.endElement("title")
        hd.startElement("link", AttributesImpl({ }))
        hd.characters("news://"+message.newsgroup)
        hd.endElement("link")
        hd.startElement("description", AttributesImpl({ }))
        hd.characters("<![CDATA[ <pre>" + unicode(message.getBody() +
            "</pre> ]]", 
            'Latin-1'))
        hd.endElement("description")
        hd.startElement("pubDate", AttributesImpl({ }))
        hd.characters(message.Date)
        hd.endElement("pubDate")
        hd.endElement("item")

class AtomRenderer:

    ATOM_NS = 'http://www.w3.org/2005/Atom'
    ET._namespace_map[ATOM_NS] = 'atom'

    def __init__(self, output_filename, title, link, max_entries):
        self.output_filename = output_filename
        self.title = title
        self.link = link
        self.max_entries = max_entries

    def render(self, results):
        self.results = results
        if not os.path.exists(self.output_filename):
            self._createFile()
        else:
            self._updateFile()

    def _createFile(self):
        feed = ET.Element("feed")
        feed.set('xmlns', self.ATOM_NS)
        title_elem = ET.SubElement(feed, 'title')
        title_elem.text = self.title
        updated = ET.SubElement(feed, 'updated')
        rfc3339date = time.strftime("%Y-%m-%dT%H:%M:%S%z")
        rfc3339date = rfc3339date[:-2] + ':' + rfc3339date[-2:]
        updated.text = rfc3339date
        id_elem = ET.SubElement(feed, 'id')
        id_elem.text = self.link
        link_elem = ET.SubElement(feed, 'link')
        link_elem.set('rel', 'self')
        link_elem.set('href', self.link)
        entry_count = 0
        for article in self.results:
            self._renderMessage(feed, article)
            entry_count += 1
            if entry_count >= self.max_entries:
                break
        tree = ET.ElementTree(feed)
        tree.write(self.output_filename, 'UTF-8')

    def _updateFile(self):
        tree = ET.parse(self.output_filename)
        feed = tree.getroot()
        entry_count = 0
        for article in self.results:
            self._renderMessage(feed, article)
            entries = tree.findall("{%s}entry" % self.ATOM_NS)
            if len(entries) > self.max_entries:
                feed.remove(entries[0])
            entry_count += 1
            if entry_count >= self.max_entries:
                break
        tree.write(self.output_filename, 'UTF-8') 

    def _renderMessage(self, feed, article):
        entry = ET.Element("{%s}entry" % self.ATOM_NS)
        id = ET.SubElement(entry, '{%s}id' % self.ATOM_NS)
        id.text = 'message-id:' + article.MessageID.replace('<', '').replace('>', '')
        title = ET.SubElement(entry, '{%s}title' % self.ATOM_NS)
        title.text = article.Subject
        author = ET.SubElement(entry, '{%s}author' % self.ATOM_NS)
        name = ET.SubElement(author, '{%s}name' % self.ATOM_NS)
        name.text = article.From
        updated = ET.SubElement(entry, '{%s}updated' % self.ATOM_NS)
        try:
            abbr_day_expr = re.compile("^\w{3}, ")
            article.Date = abbr_day_expr.sub('', article.Date)
            updated.text = datetime.datetime.strptime(article.Date[0:-6], 
                "%d %b %Y %H:%M:%S").isoformat() + article.Date[-5:-2] + \
                        ':' + article.Date[-2:]
            content = ET.SubElement(entry, '{%s}content' % self.ATOM_NS)
            content.set('type', 'text')
            content.text = article.getBody()
            feed.append(entry)
        except ValueError, e:
            perror("Error rendering " + article.MessageID + ": " + e.message)



def parseOptions():
    parser = OptionParser()
    parser.add_option("-c", "--conf", dest="config_filename",
                              help="Use configuration file FILE",
                              default=expanduser("~/.newsbiffrc"), metavar="FILE")

    parser.add_option("-t", "--text-output", dest="text_filename",
                              help="Write results as plain text to FILE", metavar="FILE")

    parser.add_option("-r", "--rss-output", dest="rss_filename",
                              help="Write results as RSS 2.0 to FILE ", metavar="FILE")

    (options, args) = parser.parse_args()

    if not exists(options.config_filename):
        parser.error("Configuration file %s does not exist." %
                options.config_filename)

    return options

if __name__ == '__main__':
    options = parseOptions()
    engine = SearchEngine(options.config_filename)
    engine.run()
    if options.rss_filename is not None:
        RssRenderer(engine.results, options.rss_filename).render()
    if options.text_filename is not None:
        TextRenderer(engine.results, options.text_filename).render()
#    if (options.rss_filename is None) and (options.text_filename is None):
#        TextRenderer(engine.results).render()

#EOF
