# -*- coding: utf-8 -*-
from __future__ import with_statement
import httplib2
import urllib
from django.conf import settings
from datetime import *
from opengov.govtrack.models import CrawlItem
from datetime import timedelta
import re
from lxml import etree
from cStringIO import StringIO
from opengov.govtrack.util import *
from django.core.mail import send_mail
import sys

def queue_utredningar(year=None):
    try:
        if year == None:
            # Kor aktuellt ar
            year = date.today().year

        print(u"Soker utredningar fran %s" % year)

        url = u"http://rixlex.riksdagen.se/webbnav/index.aspx?nid=3262&rm=%s" % str(year)
        h = httplib2.Http()
        resp, content = h.request(url)
        content = content.decode("utf-8")

        if resp["status"] == "200":
            # Las lista av utredningar
            dom = etree.HTML(content)
            items = dom.xpath("//ol/li")
            print "Antal: ", len(items)
            i = 0

            if items:
                for item in items:
                    i = i +1
                    print "Post %s" % str(i)
                    text = item.xpath("a")[0].text 
                    uref = extract_utredning_reference_from_text(text) 
                    if not uref:
                        # Kan inte hitta utredningsbeteckning i titeln - försök med urlfragment
                        href = item.xpath("a")[0].attrib["href"]

                        #split at %bet=
                        bet = href.split("&bet=")[1]

                        if re.search(u"[A-Za-z]*\d\d\d\d:.*", bet):
                            yearpos = re.search("\d\d\d\d", bet).start()
                            uref = bet[:yearpos] + " " + bet[yearpos:]
                        else:
                            print u"----Ingen beteckning for %s" % text
                            continue #med nasta post

                    print u"Forsaker lasa %s" % uref
                    try:
                        utredning = Utredning.objects.get(nummer__iexact=uref)
                    except Utredning.DoesNotExist:
                        # Lagg till i klattringsko
                        try:
                            ci = CrawlItem.objects.get(nummer__iexact="%s" % uref, klass__iexact="Utredning")
                            print(u"Crawlitem for %s finns redan" % uref)
                        except CrawlItem.DoesNotExist:
                            ci = CrawlItem()
                            ci.nummer = "%s" % uref
                            ci.klass = "Utredning"
                            ci.save()
                            print(u"Sparade Crawlitem for %s" % uref)
            else:
                print(u"Inga utredningar fran %s" % str(year))
        else:
            print u"Fel status vid anrop av rixlex: %s" % resp["status"]
    except:
        text = "Fel i queue_utredningar: %s" % sys.exc_info()[0] 
        print text 
        send_mail(u"opengov: queue_utredningar", text, settings.ADMINS[0][1], [settings.ADMINS[0][1]], fail_silently=True)




def fetch_utredningar():
    print(u"Borjar lasa utredningar fran ko")
    crawl_items = CrawlItem.objects.filter(klass="Utredning")
    print(u"%s poster i klattringskon" % len(crawl_items))

    for crawl_item in crawl_items:
        if fetch_single_utredning(crawl_item.nummer):
            print(u"Raderar klattringspost for %s" % crawl_item )
            crawl_item.delete()


def fetch_single_utredning(nummer):
    print(u"Hamtar %s" % nummer )

    h = httplib2.Http()
    doc_url = "http://www.riksdagen.se/webbnav/?nid=3255&doktyp=komm&dok_id=B2_" + url_escape_utf8(nummer.replace(" ","").strip())
    resp, content = h.request(doc_url)
    filepath = ""

    if resp["status"] == "200":
        # Lagg till dokument i bearbetningsko
        filepath = settings.GOVTRACK_INDEX_QUEUE + "/utredning-" + nummer.replace(":","-").replace(" ","_") + ".html"
        with file(filepath, 'w') as docfile:
            docfile.write(content)
            print(u"Sparade %s till %s" % (nummer, filepath) )
            return True
    else:
        print(u"HTTP %s for %s" % (resp["status"], nummer) )

    return False



# Hamta radata for kommittédirektiv
def queue_komdir(start_date=None, end_date=None):

    if start_date == None and end_date == None:
        # Las senaste datum for skrapning
        start_date = get_date_from_file(settings.GOVTRACK_KOMDIR_LAST_CRAWL_FILE, date.today())
        twenty_days = timedelta(days=20)
        start_date = start_date - twenty_days
        end_date = date.today()

    print(u"Soker poster fran %s" % start_date)

    the_date = start_date
    one_day = timedelta(days=1)

    while the_date <= end_date:
        print(u"Soker poster fran %s" % the_date.isoformat())

        # Sok direktivnummer for aktuellt datum
        url = "http://62.95.69.15/cgi-bin/thw?%24{HTML}=dir_lst&%24{OOHTML}=dir_dok&%24{SNHTML}=dir_err&%24{MAXPAGE}=26&%24{TRIPSHOW}=format%3DTHW&%24{BASE}=DIR&%24{FREETEXT}=&RUB=&BET=&ORG=&UDAT=" + the_date.isoformat()
        h = httplib2.Http()
        resp, content = h.request(url)

        if resp["status"] == "200":
            if re.search(u"<p>Sökningen gav ingen träff!</p>", content):
                print(u"Inga direktiv %s" % the_date.isoformat())
            else:
                print(u"Direktiv finns %s" % the_date.isoformat())

                # Kolla om lista eller direkttraff
                if re.search(u"Sökresultat: UDAT", content):
                    # lista
                    parser = etree.HTMLParser()
                    result_dom = etree.parse(StringIO(content), parser)
                    for item in extract_komdir_numbers_from_searchresult_dom(result_dom):
                        create_komdir_crawlitem(item)
                else:
                    # direktträff
                    dirnr = extract_komdir_number_from_komdir_page(content)
                    create_komdir_crawlitem(dirnr)

        # move on
        the_date = the_date + one_day

    # Spara senast lasta datum
    store_date_in_file(settings.GOVTRACK_KOMDIR_LAST_CRAWL_FILE, the_date - one_day)
    return


def fetch_komdir():
    print(u"Borjar lasa kommittedirektivsko")
    crawl_items = CrawlItem.objects.filter(klass="Kommittedirektiv")
    print(u"%s poster i klattringskon" % len(crawl_items))

    for crawl_item in crawl_items:
        if fetch_single_komdir(crawl_item.nummer):
            print(u"Raderar klattringspost for %s" % crawl_item )
            crawl_item.delete()


def fetch_single_komdir(dirnummer):
    print(u"Hamtar %s" % dirnummer )

    dirnummer_escaped = urllib.quote_plus(dirnummer)

    h = httplib2.Http()
    doc_url = "http://62.95.69.15/cgi-bin/thw?%24{HTML}=dir_lst&%24{OOHTML}=dir_dok&%24{SNHTML}=dir_err&%24{MAXPAGE}=26&%24{TRIPSHOW}=format%3DTHW&%24{BASE}=DIR&%24{FREETEXT}=&RUB=&BET=" + dirnummer_escaped + "&ORG=&UDAT="

    resp, content = h.request(doc_url)
    filepath = ""

    if resp["status"] == "200":
        # Lagg till dokument i bearbetningsko
        filepath = settings.GOVTRACK_INDEX_QUEUE + "/komdir-" + dirnummer.replace(":","-") + ".html"
        with file(filepath, 'w') as docfile:
            docfile.write(content)
            print(u"Sparade %s till %s" % (dirnummer, filepath) )
            return True
    else:
        print(u"HTTP %s for %s" % (resp["status"], dirnummer) )

    return False
