#!/usr/bin/env python

import time, datetime
import feedparser
from sgmllib import SGMLParser
import hashlib
import re
import sys
sys.path.append("../gen-py")
sys.path.append("/root/buildthrudb/thrudb/tutorial/gen-py")
#sys.path.append("../../gen-py")

from thrift import Thrift
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TFramedTransport, TMemoryBuffer
from thrift.protocol.TBinaryProtocol import TBinaryProtocol

from Thrudoc import Thrudoc, ttypes as ThrudocTypes
from Thrudex import Thrudex, ttypes as ThrudexTypes
#from Thruqueue import Thruqueue, ttypes as ThruqueueTypes

from Feed.ttypes import Feed, Entry

THRUDEX_PORT   = 11299;
THRUDOC_PORT   = 11291;
#THRUQUEUE_PORT = 9093;

THRUDOC_BUCKET = "feeds";
THRUDEX_FEED_INDEX  = "feeds";
#THRUQUEUE_QUEUE_NAME = "parse";


class FeedManager(object):
    def __init__(self):
        self.connect_to_thrudoc()
        self.connect_to_thrudex()
 #       self.connect_to_thruqueue()

    def connect_to_thrudoc(self):
        socket = TSocket('localhost', THRUDOC_PORT)
        transport = TFramedTransport(socket)
        protocol = TBinaryProtocol(transport)
        self.thrudoc = Thrudoc.Client(protocol)
        transport.open()
        self.thrudoc.admin("create_bucket", THRUDOC_BUCKET)

    def connect_to_thrudex(self):
        socket = TSocket('localhost', THRUDEX_PORT)
        transport = TFramedTransport(socket)
        protocol = TBinaryProtocol(transport)
        self.thrudex = Thrudex.Client(protocol)
        transport.open()
        self.thrudex.admin("create_index", THRUDEX_FEED_INDEX)

    def connect_to_thruqueue(self):
        socket = TSocket('localhost', THRUQUEUE_PORT)
        transport = TFramedTransport(socket)
        protocol = TBinaryProtocol(transport)
        self.thruqueue = Thruqueue.Client(protocol)
        transport.open()
        self.thruqueue.create(THRUQUEUE_QUEUE_NAME, 1)

    # entry members
    def add_entry(self, key, e):
        eid = self.store_entry(key, e)
        self.index_entry(eid, e)
        return eid
    
    def get_entry(self, eid):
        e_str = self.thrudoc.get(THRUDOC_BUCKET, eid)
        if len(e_str) == 0:
            return
        return self.edeserialize(e_str)

    def store_entry(self, key, e):
        e_str = self.eserialize(e)
        self.thrudoc.put(THRUDOC_BUCKET, key, e_str)
        return key

    def index_entry(self, eid, e):
        doc = ThrudexTypes.Document()
        doc.key = eid
        doc.index = THRUDEX_FEED_INDEX
        doc.fields = []

        field       = ThrudexTypes.Field()
        field.key   = "fid"
        field.value = e.fid
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "type"
        field.value = "entry"
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "tags"
        field.value = e.tags
        field.sortable = True
        doc.fields.append(field)
            
        field       = ThrudexTypes.Field()
        field.key   = "link"
        field.value = e.link
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "date"
        field.value = e.date
        field.sortable = True
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "id"
        field.value = e.id
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "updated"
        field.value = e.updated
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "title"
        field.value = e.title
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "content"
        field.value = e.content
        doc.fields.append(field)

        self.thrudex.put(doc)

    def eserialize(self, e):
        mbuf = TMemoryBuffer()
        mbuf_protocol = TBinaryProtocol(mbuf)
        e.write(mbuf_protocol)
        return mbuf.getvalue()
    
    def edeserialize(self, e_str):
        mbuf = TMemoryBuffer(e_str)
        mbuf_protocol = TBinaryProtocol(mbuf)
        e = Entry()
        e.read(mbuf_protocol)
        return e

    def create_doc_list(self, ids):
        docs = []
        for pointer, ele in enumerate(ids):
            doc = ThrudocTypes.Element()
            doc.bucket = THRUDOC_BUCKET
            doc.key    = ele.key
            docs.append(doc)
        return docs   

    def search_entries(self, terms, offset=0, limit=1000):
        q = ThrudexTypes.SearchQuery()
        q.index = THRUDEX_FEED_INDEX
        q.query = terms + " AND type:(entry)"
#        q.offset = offset
        q.limit = limit
#        q.sortby = "updated"
        q.desc = True

        print "sent ", q
        ids = self.thrudex.search(q)
        if ids is None:
            return
        entries = []
        if len(ids.elements) > 0:
            list_response = self.thrudoc.getList(self.create_doc_list(ids.elements))
            for ele in list_response:
                if ele.element.value != '':
                    e = {}
                    entry = self.edeserialize(ele.element.value)
 
                    e['entry'] = entry
                    e['feed'] = self.get_feed(entry.fid)
                    entries.append(e)
        return ids.total, entries
        
    # feed members
    def add_feed(self, key, f):
        fid = self.store_feed(key, f)
        self.index_feed(fid, f)
        return fid
    
    def get_feed(self, fid):
        f_str = self.thrudoc.get(THRUDOC_BUCKET, fid)
        if len(f_str) == 0:
            return
        return self.fdeserialize(f_str)

    def store_feed(self, key,  f):
        f_str = self.fserialize(f)
        self.thrudoc.put(THRUDOC_BUCKET, key, f_str)
        return f.link

    def index_feed(self, fid, f):
        doc = ThrudexTypes.Document()
        doc.key = fid
        doc.index = THRUDEX_FEED_INDEX
        doc.fields = []

        field       = ThrudexTypes.Field()
        field.key   = "type"
        field.value = "feed"
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "title"
        field.value = f.title
        field.sortable = True
        doc.fields.append(field)
            
        field       = ThrudexTypes.Field()
        field.key   = "link"
        field.value = f.link
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "subtitle"
        field.value = f.subtitle
        field.sortable = True
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "rights"
        field.value = f.rights
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "generator"
        field.value = f.generator
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "etag"
        field.value = f.etag
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "last_modified"
        field.value = f.last_modified
        doc.fields.append(field)

        field       = ThrudexTypes.Field()
        field.key   = "updated"
        field.value = f.updated
        doc.fields.append(field)

        self.thrudex.put(doc)

    def fserialize(self, f):
        mbuf = TMemoryBuffer()
        mbuf_protocol = TBinaryProtocol(mbuf)
        f.write(mbuf_protocol)
        return mbuf.getvalue()
    
    def fdeserialize(self, f_str):
        mbuf = TMemoryBuffer(f_str)
        mbuf_protocol = TBinaryProtocol(mbuf)
        f = Feed()
        f.read(mbuf_protocol)
        return f

    def search_feeds(self, terms, offset=0, limit=100):
        q = ThrudexTypes.SearchQuery()
        q.index = THRUDEX_FEED_INDEX
        q.query = terms + " AND type:(feed)"
        q.offset = offset
        q.limit = limit
#        q.sortby = "updated"
        q.desc = True
        ids = self.thrudex.search(q)

        if ids is None:
            return
        feeds = []
        if len(ids.elements) > 0:
            list_response = self.thrudoc.getList(self.create_doc_list(ids.elements))
            for ele in list_response:
                if ele.element.value != '':
                    f = {}
                    feed = self.fdeserialize(ele.element.value)
                    entries.append(f)
        return ids.total, feeds


    def process_feeds(self):
        
        urls = []
        # replace below with a queue or something
        for line in open("feeds.txt", "rb"):
            if line.startswith("#"):
                continue
            urls.append(line.strip())

        while True:
            now = time.time()

            for url in urls:
                feedkey = hashlib.sha224(url).hexdigest()
                f = Feed()               
                try:
                    f = self.get_feed(feedkey)
                except Exception, e:
                    print e

                # let feedparser check etags/lm
                d = {}
                try:
                    d = feedparser.parse(url, \
                                         etag = f.etag, modified = f.last_modified)
                except UnicodeDecodeError, e:
                    print e
                    continue
                
                status = d.get('status', 404)

                # not updated, bail out
                if status == 304:
                    continue
                fd = d.feed                

                # for those that don't support either etag/lm,
                # do a basic string compare on the updated. if they match,
                # we will skip as well.
                if fd.get('updated', "dummy") == f.updated:
                    continue

                # at this point we've made an effort to not pull unchanged
                # feeds. pull and index.
                print "%s is new or has changed, fetching: %d" % (url, status)
                if status in [200, 301, 302, 307]:

                    try:
                        f.title = fd.get('title', "").encode('utf-8')
                        f.subtitle = fd.get('subtitle', "").encode('utf-8')
                    except UnicodeDecodeError:
                        print "bleh"
                        
                    f.rights = fd.get('generator', None)
                    f.link = fd.get('link', None)
                    f.updated = fd.get('updated', None)
                    f.etag = d.get('etag', None);
                    f.last_modified = d.get('last_modified', None);

                    self.add_feed(feedkey, f)

                    for en in d.entries:
                        e = Entry()
 
                        e.link = None
                        entrykey = None
                        postdate = None
                        try:
                            e.link = en.links[0]['href']
                            entrykey = hashlib.sha224(e.link).hexdigest()
                            existing = self.get_entry(entrykey)
                            postdate = existing.date
                        except Exception, ex:
                            print ex

                        e.date = en.get('date', None)
                        e.updated = en.get('updated', None)
                        if postdate == e.date:
                            continue

                        print "dates don't match, updating post"
                        e.fid = feedkey                            
                        e.id = en.get('id', None)
                        e.title = en.get('title', "no title").encode('utf-8')
                        try:
                            tags = en.get('tags', [])
                            e.tags = \
                                   "".join(r["term"] for r in tags).encode('utf-8')
                        
                            if(en.summary_detail):
                                e.content = en.summary_detail['value'].encode('utf-8')
                            else:
                                e.content = en.content[0]['value'].encode('utf-8')
                        except (UnicodeDecodeError, AttributeError):
                            print "no content, skipping post"
                            continue
                        
                        parser = URLLister()
                        try:
                            parser.feed(e.content)
                            parser.close()
                            for url in parser.urls:
                                print "found link" + url
                        except Exception, e:
                            print e
                            
#                        print "indexing " + e.link
                        self.add_entry(entrykey, e)

                else:
                    print "%s failed with status %d" %  (url, status)

            time.sleep(5)

class URLLister(SGMLParser):
    def reset(self):                        
        SGMLParser.reset(self)
        self.urls = []

    def start_a(self, attrs):                   
        href = [v for k, v in attrs if k=='href']
        if href:
            self.urls.extend(href)
                                            
if __name__ == "__main__":
    import daemonize as dm
#    dm.daemonize('/dev/null','/tmp/feeds.log','/tmp/feeds.log')
    fm = FeedManager()
    fm.process_feeds()
