# -*- encoding: utf-8 -*-
############################################################################
#    Copyright (C) 2007 by Håvard Gulldahl                                 #
#    havard@gulldahl.no                                                    #
#                                                                          #
#    This program is free software; you can redistribute it and#or modify  #
#    it under the terms of the GNU General Public License as published by  #
#    the Free Software Foundation; either version 2 of the License, or     #
#    (at your option) any later version.                                   #
#                                                                          #
#    This program is distributed in the hope that it will be useful,       #
#    but WITHOUT ANY WARRANTY; without even the implied warranty of        #
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         #
#    GNU General Public License for more details.                          #
#                                                                          #
#    You should have received a copy of the GNU General Public License     #
#    along with this program; if not, write to the                         #
#    Free Software Foundation, Inc.,                                       #
#    59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             #
############################################################################

import logging, BeautifulSoup, datetime, sys, types, traceback
from lib import openanything # Mark Pilgrims take on a nice downloader. http://www.diveintopython.org/http_web_services/index.html
import jeje
#import jeje.setup

class SlurpError(Exception): pass

class collector(object):

    supper = []
    def __init__(self, dbconn, dato=None):
        self.dbconn = dbconn
        self.dato = dato
        pj = jeje.ejeje_postjournal
        #print dbconn
        #self.postjournaler = dbconn.find(pj, pj.fungerer==True, pj.fjernet==False, pj.stanset==False)
        self.postjournaler = dbconn.find(pj, pj.id==30)#pj.fungerer==True, pj.fjernet==False, pj.stanset==False)
        logging.debug('fant %i postjournaler som skal scannes', self.postjournaler.count())
        
    def collect(self):
        for journal in self.postjournaler:
            try:
                self._collect(journal)
            except Exception, e:
                logging.exception(e)
                logging.error('Feil ved innsamling av journal: %s', journal.organisasjon.navn)
                f = jeje.ejeje_feil('innsamling')
                f.uri = journal.uri
                _type, _value, _tb = sys.exc_info()
                f.suppe = unicode(traceback.format_exc(_tb))
                #f.suppe = unicode('%s: %s (%s)' % (_type, _value, ''))#", _traceback.tb_lineno))
                logging.debug(f.suppe)
                f.postjurnal = journal
                self.dbconn.add(f)
            
    def _collect(self, journal):
        journal_suppe = None
        logging.debug('laster parseren')
        try:
            parser = jeje.fingerprinter.finn_parser(journal.format).parser()
        except AttributeError:
            logging.debug(u'mm, må fingerprinte journal: %s', journal.id)
            try:
                journal_suppe = suppeslurper(journal.uri)
                _parser = jeje.fingerprinter.identifiser_postjournal(journal_suppe)
            except SlurpError, e:
                _parser = None
            if _parser is None:
                #journal.fungerer = False
                f = jeje.ejeje_feil('identifisering')
                f.uri = journal.uri
                if journal_suppe:
                    f.suppe = unicode(journal_suppe)
                f.postjurnal = journal
                self.dbconn.add(f)
                #self.dbconn.commit() # flushing 
                logging.error('Kunne ikke identifisere')
                return False
            else:
                logging.debug('parser: %s', _parser)
                #parser = _parser.parser()
                parser = _parser()
                journal.format = unicode(parser.__module__.split('.')[-1])

        logging.info('Skal parse #%s: %s med parser %s', journal.id, journal.organisasjon.navn, parser.__module__.split('.')[-1])

        if not journal.implisitt_enkeltjournal:
            logging.debug('slurper inn kalender: %s', journal.uri)
            enkeltjournal = parser.parse_kalender(journal, suppe=suppeslurper(journal.uri), dato=self.dato)
        else:
            enkeltjournal = parser.parse_kalender(journal, dato=self.dato)

        logging.debug('skal hente enkeltjournal: %s', enkeltjournal.uri)
        #sjekk om uri-en allerede er hentet tidligere
        jl = jeje.ejeje_postjournal_logg
        logg = self.dbconn.find(jl, jl.postjournal==journal, jl.journaldato==enkeltjournal.dato, jl.ferdig==True)
        logging.debug('funnet i logg: %s', logg.count())
        if logg.count() > 0:
            logging.debug('enkeltjournalen er hentet tidligere: %s', journal.organisasjon.navn)
            return None
        
        try:
            journalposter = parser.parse_enkeltjournal(suppeslurper(enkeltjournal.uri), enkeltjournal.uri)
        except SlurpError, e:
            #journal.fungerer = False
            f = jeje.ejeje_feil('parse enkeltjournal')
            f.uri = journal.uri
            f.postjurnal = journal
            self.dbconn.add(f)
            #self.dbconn.commit() # flushing 
            logging.error('Kunne ikke parse enkeltjournal')
            return False
        logg = jeje.ejeje_postjournal_logg()
        logg.postjournal = journal
        logg.tidspunkt = datetime.datetime.now()
        logg.journaldato = enkeltjournal.dato
        logg.req_uri = enkeltjournal.uri
        logg.ferdig = len(journalposter) > 1 ## hvis ikke ferdig, skannes på nytt senere

def slurp(uri):
    logging.debug('henter uri: %s', uri)
    assert(isinstance(uri, types.StringTypes) and len(uri.strip())>0)
    req = openanything.fetch(uri)
    logging.debug('uri-en ble hentet emd status %s', req['status'])
    return req

def suppeslurper(uri):
    html_suppe = slurp(uri)
    if 399 < html_suppe['status'] < 599 or len(html_suppe['data']) == 0:
        raise SlurpError('Got HTTP status %s and data length %s from %s' % \
            (html_suppe['status'], len(html_suppe['data']), uri))
    return BeautifulSoup.BeautifulSoup(html_suppe['data'],
                                        smartQuotesTo=None,
                                        convertEntities='html')

## 1. let gjennom databasen etter postjournaler som skal skannes (ikke disabled, fungerer)
## 2. for hver postjournal, hent data
## 3.   -> for data inn i fingerprinter (->parser)
