# -*- encoding: utf-8 -*-
# parser module
############################################################################
#    Copyright (C) 2007 by Håvard Gulldahl                                 #
#    havard@gulldahl.no                                                    #
#                                                                          #
#    This program is free software; you can redistribute it and#or modify  #
#    it under the terms of the GNU General Public License as published by  #
#    the Free Software Foundation; either version 2 of the License, or     #
#    (at your option) any later version.                                   #
#                                                                          #
#    This program is distributed in the hope that it will be useful,       #
#    but WITHOUT ANY WARRANTY; without even the implied warranty of        #
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         #
#    GNU General Public License for more details.                          #
#                                                                          #
#    You should have received a copy of the GNU General Public License     #
#    along with this program; if not, write to the                         #
#    Free Software Foundation, Inc.,                                       #
#    59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             #
############################################################################

import logging, string, time, datetime, re, urlparse, os.path
import BeautifulSoup
import jeje
import jeje.collector

class ParseError(Exception): pass

class ejeje_parser:
    enkeltjournal = None
    journalposter = []

    def __init__(self):
        self.re_entity = re.compile(r'&#?x?[A-Za-z0-9]+;')

    def sett_info(self, info):
        logging.debug("Fikk info: %s", info)

    def suppeslurper(self, uri):
        return jeje.collector.suppeslurper(uri)
        
    def parse_kalender(self, postjournal, suppe=None, dato=None):
        """ returnerer jeje.enkeltjournal () fra en suppe med kalender/oversikt over enkeltjournaler"""
        uri, dato = self.finn_enkeltjournal_uri(postjournal, suppe, dato)
        self.enkeltjournal = jeje.ejeje_enkeltjournal()
        self.enkeltjournal.postjournal = postjournal
        self.enkeltjournal.uri = uri
        self.enkeltjournal.dato = dato
        return self.enkeltjournal

    def parse_enkeltjournal(self, suppe, uri): # re-implementeres nedstrøms
        """ parser enkeltjournalen. returnerer liste av jeje.ejeje_journalpost () """
        return self.journalposter

    def finn_enkeltjournal_uri(self, postjournal, suppe, dato=None):
        gmldato = self.delta_arbeidsdager(postjournal.etterslep, dato)
        return unicode(time.strftime(postjournal.enkeltjournal_uri, gmldato.timetuple())), gmldato
        #if dato is None:
            #dato = datetime.date.today()
        #try:
            #diff = datetime.timedelta(days=postjournal.etterslep)
            #return unicode(time.strftime(postjournal.enkeltjournal_uri, (dato-diff).timetuple())), dato-diff
        #except:
            #raise

    def legg_til_post(self):
        post = self.enkeltjournal.ny_post()
        self.journalposter.append(post)
        return post
        #return jeje.ejeje_journalpost()

    def trekk_sammen_streng(self, liste):
        "Trekk ut tekst-elementer fra en tekst oppdelt av html-elementer"
        return unicode(string.join([s.strip() for s in liste if isinstance(s, BeautifulSoup.NavigableString)]))

    def trekk_ut_streng(self, node):
        u"Trekk ut en tekststreng fra en gren på suppe-treet"
        while node.findChildren():
            node = node.findChildren()[0]
        return unicode(node.string).strip()
    
    def remove_markup(self, s):
        """Remove sgml/xml markup. &nbsp; => ' ', &gt; => '>', &lt; => '<', &amp; => '&', etc"""
        #logging.debug('remove markup: %s', s)

        return re.sub(self.re_entity, entity_map, s)

    def parse_url_query(self, s):
        d = {}
        for z in s[s.find('?')+1:].split('&'):
            if not len(z): continue
            x,y = z.split('=')
            d[x] = y
        return d

    def absoluttifiser(self, lenkeuri, henvisende_uri):
        "Mottar en uri som kan være relativ og returrnerer en fullstendig en"
#        logging.debug("Mottar uri: '%s'", lenkeuri)
        if lenkeuri.startswith('http://') or lenkeuri.startswith('https://'):
            return lenkeuri
        return urlparse.urljoin(henvisende_uri, lenkeuri)

    def finn_siste_arbeidsdag(self, dato=None):
        "Returnerer siste arbeidsdag (man-fre, t.o.m. i dag)"
        if dato is None: dato = datetime.date.today()
        if dato.isoweekday() in (1,2,3,4,5):
            return dato
        else:
            return dato-datetime.timedelta(days=dato.isoweekday()-5)

    def delta_arbeidsdager(self, dager, fradato=None):
        "Returnerer en dato `dager' arbeidsdager i fortiden (ifht `fradato', hvis angitt)"
        i = 0
        d = self.finn_siste_arbeidsdag(fradato)
        while i != dager:
            i += 1
            d -= datetime.timedelta(days=1)
            d = self.finn_siste_arbeidsdag(d)
        return d
        
    def sjekk_felter(self):
        pass

def entity_map(entity):
    ent = entity.group(0).lower()
    #logging.debug('entity_mapping: %s', entity.group(0))
    if ent.startswith('&#x'): ## numerical ent, hexa
        return unichr(int(ent[3:-1], 16))
    elif ent.startswith('&#'): ## numerical ent, decimal
        return unichr(int(ent[2:-1], 10))
    ## must be character ent
    _map = { '&nbsp;': ' ', '&gt;': '>', '&lt;':'<', '&amp;':'&' }
    return _map[ent]
