# -*- encoding: utf-8 -*-
# jupiter 2013 parser
############################################################################
#    Copyright (C) 2007 by Håvard Gulldahl                                 #
#    havard@gulldahl.no                                                    #
#                                                                          #
#    This program is free software; you can redistribute it and#or modify  #
#    it under the terms of the GNU General Public License as published by  #
#    the Free Software Foundation; either version 2 of the License, or     #
#    (at your option) any later version.                                   #
#                                                                          #
#    This program is distributed in the hope that it will be useful,       #
#    but WITHOUT ANY WARRANTY; without even the implied warranty of        #
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         #
#    GNU General Public License for more details.                          #
#                                                                          #
#    You should have received a copy of the GNU General Public License     #
#    along with this program; if not, write to the                         #
#    Free Software Foundation, Inc.,                                       #
#    59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             #
############################################################################

import logging, re, urllib
from datetime import datetime
from time import strptime
from cgi import parse_qs, parse_qsl
import urlparse
import BeautifulSoup
import jeje, jeje.parser

class parser(jeje.parser.ejeje_parser):
    def parse_enkeltjournal(self, suppe, uri):
        logging.info('starter jupiter 2013-parser')
        logging.debug(uri)
        dato = suppe.find('div', {'style':'text-align:center; font-size:16px; font-weight:bold'}).string[26:]
        logging.debug('fant dato %s', dato)
        self.sett_info(dato)
        while True:
            logging.debug('finner tittelposter i suppa')
            _journaltabell = suppe.find('table', {'style':'width: 100%'})
            _tittelposter = _journaltabell.findChildren(recursive=False)[1:]
            logging.debug('fant %d titler', len(_tittelposter))
            for i, tp in enumerate(_tittelposter):

                #<th style="width:80px">Saksnr</th>
                #<th><a href="/?date=02.10.2013&amp;direction=asc&amp;sort=dokdato">Datert</a></th>
                #<th><a href="/?date=02.10.2013&amp;direction=asc&amp;sort=ndoktype">Type</a></th>
                #<th><a href="/?date=02.10.2013&amp;direction=asc&amp;sort=offinnhold">Beskrivelse</a></th>
                #<th><a href="/?date=02.10.2013&amp;direction=asc&amp;sort=avsmot">Avsender/Mottaker</a></th>
                #<th><a href="/?date=02.10.2013&amp;direction=asc&amp;sort=saksbehandler">Saksbehandler</a></th>
                #<th>Dokument</th>
                #<th>Vedlegg</th>


                _arkivsaksnr, _dokumentdato, _type, _tittel, _adressat, _saksbehandler, _dok, _vedlegg = [b.contents for b in tp.findAll('td')]
                #tittel = ''.join([ s for s in _tittel if isinstance(s, BeautifulSoup.NavigableString)])
                tittel = self.trekk_sammen_streng(_tittel)
                logging.debug('arbeider med post #%d: %s', i+1, tittel)

                post = self.legg_til_post()
                post.sett_tittel(tittel)
                arkivsaksnr, dokumentnr = self.trekk_sammen_streng(_arkivsaksnr).split('-')
                logging.debug('arkivsaksnr: %s, dokumentnr: %s', arkivsaksnr, dokumentnr)
                post.sett_arkivsaksnr(arkivsaksnr)
                post.sett_dokumentnr(dokumentnr)
                post.sett_adressat(self.trekk_sammen_streng(_adressat))
                #post.sett_refnr(self.trekk_sammen_streng(_refnr))
                #adressat = ''.join([ s for s in _adressat if isinstance(s, BeautifulSoup.NavigableString)])
                #if adressat: 
                    #post.sett_adressat(adressat)
                person = post.sett_saksbehandler(self.trekk_sammen_streng(_saksbehandler))
                person.organisasjon_id = self.enkeltjournal.postjournal.organisasjon_id
                logging.debug('dokumentdato: %s', self.trekk_sammen_streng(_dokumentdato))
                post.sett_dokumentdato(datetime(*strptime(self.trekk_sammen_streng(_dokumentdato), '%d.%m.%Y')[0:6]))
                logging.debug('retung/type: %s' % self.trekk_sammen_streng(_type[1]))
                post.sett_retning(self.trekk_sammen_streng(_type[1]))
                __dok = _dok[1] # get actual <a> element
                if __dok.has_key('class') and __dok['class'] == 'uoff': # unntatt offfentlighet
                    post.sett_offentlig(False)
                    logging.debug('lovref: %s', self.trekk_sammen_streng(__dok))
                    post.sett_lovreferanse(self.trekk_sammen_streng(__dok))
                else:
                    _href = __dok['href']
                    if not _href.startswith('http'):
                        protokoll, server, path = urlparse.urlparse(uri)[:3]
                        _href = '%s://%s%s' % (protokoll, server, _href)
                    post.sett_dok_uri(_href)
                try:
                    v = [_v for _v in _vedlegg if isinstance(_v, BeautifulSoup.Tag)][1]
                    vv = v.find('a')
                    logging.debug("Fant vedlegg: %s (%s)", vv.string, vv['href'])
                    # TODO: hent vedlegg og lagre det
                except Exception, (e):
                    pass
#                    logging.exception(e)
                post.kontroller_felter()
            # hent neste suppe
            try: 
                neste_uri = self.absoluttifiser(suppe.find('a', 'next_page')['href'], uri)
            except TypeError: # no next page
                break
            except Exception, (e):
                logging.exception(e)
                break

            logging.debug("slurper inn neste uri: %s", neste_uri)
            suppe = self.suppeslurper(neste_uri)

        return self.journalposter
