# -*- coding: utf-8 -*-
import re
import os
from datetime import datetime, date
from itertools import islice
import requests
import requests.exceptions

import six
from rdflib import Graph,Literal

from ferenda import DocumentRepository
from ferenda.errors import DocumentRemovedError, ParseError
from ferenda.decorators import recordlastdownload, managedparsing

from ferenda import TextReader, Describer, FSMParser, CitationParser, URIFormatter

from ferenda.elements import Body, Heading, Preformatted, Paragraph, UnorderedList, ListItem, Section, Subsection, Subsubsection, UnicodeElement, CompoundElement, serialize

class RFCHeader(UnicodeElement): pass
class DocTitle(UnicodeElement): pass
class Pagebreak(CompoundElement): pass

class PreambleSection(CompoundElement):
    tagname = "div"
    
    def _get_classname(self):
        return self.__class__.__name__.lower()
    classname = property(_get_classname)

    def as_xhtml(self, uri):
        element = super(PreambleSection, self).as_xhtml(uri)
        element.set('property', 'dct:title')
        element.set('content', self.title)
        element.set('typeof', 'bibo:DocumentPart')
        return element

class RFC(DocumentRepository):
    alias = "rfc"
    start_url             = "http://www.ietf.org/download/rfc-index.txt"
    document_url_template = "http://tools.ietf.org/rfc/rfc%(basefile)s.txt"
    document_url_regex    = "http://tools.ietf.org/rfc/rfc(?P<basefile>\w+).txt"
    downloaded_suffix     = ".txt"
    namespaces = ('rdf',  # always needed
                  'dct',  # title, identifier, etc (could be replaced by equiv bibo prop?) 
                  'bibo', # Standard and DocumentPart classes, chapter prop
                  'xsd', # datatypes
                  ('rfc','http://example.org/ontology/rfc/')
                  )
                  
    @recordlastdownload
    def download(self, basefile=None):
        if basefile and self.document_url_template:
            return self.download_single(basefile)
        res = requests.get(self.start_url)
        indextext = res.text
        reader = TextReader(ustring=indextext,linesep=TextReader.UNIX)  # see TextReader class
        iterator = reader.getiterator(reader.readparagraph)


        if 'downloadmax' in self.config or 'FERENDA_DOWNLOADMAX' in os.environ:
            if 'downloadmax' in self.config:
                maxdoc = int(self.config.downloadmax)
            else:
                maxdoc = int(os.environ['FERENDA_DOWNLOADMAX'])
            self.log.info("Only downloading max %s documents" % maxdoc)
            links = islice(self.download_get_basefiles(iterator), maxdoc)
        else:
            links = self.download_get_basefiles(iterator)
        for (basefile,url) in links:
            try:
                if not os.path.exists(self.store.downloaded_path(basefile)):
                    self.download_single(basefile)
            except requests.exceptions.HTTPError as e:
                if e.response.status_code == 404:
                    # create a empty dummy file in order to
                    # avoid looking for it over and over again:
                    with open(self.store.downloaded_path(basefile), "w"):
                        pass

    def download_get_basefiles(self, source):
        for p in reversed(list(source)):
            if re.match("^(\d{4}) ",p): # looks like a RFC number
                if not "Not Issued." in p: # Skip RFC known to not exist
                    basefile = str(int(p[:4]))  # eg. '0822' -> '822'
                    yield (basefile, None) 
        

    @staticmethod # so as to be easily called from command line
    def get_parser():

        # recognizers, constructors and helpers are created as nested
        # ordinary functions, but could just as well be staticmethods
        # (or module-global functions)

        def is_rfcheader(parser,chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            (leftlines, rightlines, linelens) = _splitcolumns(chunk)
            # all rfc headers are at least 3 lines long
            if len(linelens) < 3:
                return False
            targetlen = linelens[0]
            for (idx, length) in enumerate(linelens):
                if rightlines[idx] == "" and length > 40:
                    return False
                elif rightlines[idx] != "" and length != targetlen:
                    return False
            return True

        # FIXME: use this in parse_header as well
        def _splitcolumns(chunk):
            linelens = []
            leftlines = [] 
            rightlines = []
            for line in chunk.split("\n"):
                linelens.append(len(line))
                if "   " in line:
                    (left,right) = line.split("   ",1)
                else:
                    (left, right) = line, ""
                leftlines.append(left)
                rightlines.append(right)
            return (leftlines, rightlines, linelens)
            
        

        def is_doctitle(parser,chunk=None): return True

        def is_pagebreak(parser,chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            return ('\f' in chunk)
        
        def is_header(parser,chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            stripchunk = chunk.strip()
            # a header should be non-emtpy, be on a single line, not
            # end with "." and not start with an indent.
            if ((stripchunk != "") and
                (len(stripchunk.split("\n")) == 1) and
                (not stripchunk.endswith('.')) and
                (not chunk.startswith(' '))):
                return True

        def is_section(parser, chunk=None):
            (ordinal,title) = analyze_sectionstart(parser,chunk)
            return section_segments_count(ordinal) == 1

        def is_subsection(parser, chunk=None):
            (ordinal,title) = analyze_sectionstart(parser,chunk)
            return section_segments_count(ordinal) == 2

        def is_subsubsection(parser, chunk=None):
            (ordinal,title) = analyze_sectionstart(parser,chunk)
            return section_segments_count(ordinal) == 3

        def is_preformatted(parser, chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            # all paragraphs start with a three space indent -- start
            # by removing this
            stripped = "\n".join([x[3:] for x in chunk.split("\n")])
            # replace double spaces after end of sentences to avoid
            # false positives:
            stripped = stripped.replace(".  ", ". ")
            # If any double spaces left, probably preformatted text
            # (eg. tables etc). Same if several periods are present
            # (indicative of leaders in TOCs)
            return ("  " in stripped or
                    "...." in stripped or
                    ". . . " in stripped)

        def is_bnf(parser,chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
                return (is_preformatted(parser,chunk) and " = " in chunk)

        def is_paragraph(parser, chunk=None):
            return True
        
        def is_ul_listitem(parser, chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            return chunk.strip().startswith("o  ")

        def is_definition_title(parser, chunk=None):
            # looks like header but starts indented
            return False

        def is_definition(parser, chunk=None):
            # entire p is indented 6 spaces instead of 3. But if it
            # follows a ul li, problably continuation of that.
            return False

        def make_body(parser):
            return p.make_children(Body())
        setattr(make_body,'newstate','body')
        
        def make_preamble_section(parser):
            s = PreambleSection(title=parser.reader.next())
            return p.make_children(s)
        setattr(make_preamble_section,'newstate','preamble-section')

        def skip_pagebreak(parser):
            chunk = parser.reader.next()
            lastline = chunk.split("\n")[-1]
            parts = re.split("  +", lastline)
            if len(parts) > 2:
                return Pagebreak(shorttitle=parts[1])
            else:
                return None

        def make_header(parser):
            chunk = parser.reader.next()
            h = Heading(chunk.strip())
            return h

        def make_paragraph(parser):
            chunk = p.reader.next()
            return Paragraph([" ".join(chunk.split())])

        def make_preformatted(parser):
            chunk = p.reader.next()
            return Preformatted([chunk])

        def make_bnf(parser):
            chunk = p.reader.next()
            return Preformatted([chunk],**{'class':'bnf'})

        def make_section(parser):
            (secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
            s = Section(ordinal=secnumber,title=title,uri=None,meta=None)
            return parser.make_children(s)
        setattr(make_section,'newstate','section')

        def make_subsection(parser):
            (secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
            s = Subsection(ordinal=secnumber,title=title,uri=None,meta=None)
            return parser.make_children(s)
        setattr(make_subsection,'newstate','subsection')

        def make_subsubsection(parser):
            (secnumber, title) = analyze_sectionstart(parser,parser.reader.next())
            s = Subsubsection(ordinal=secnumber,title=title,uri=None,meta=None)
            return parser.make_children(s)
        setattr(make_subsubsection,'newstate','subsubsection')

        def make_unordered_list(parser):
            (listtype,ordinal,separator,rest) = analyze_listitem(parser.reader.peek())
            ol = UnorderedList(type=listtype) # should 
            ol.append(parser.make_child(make_listitem,"listitem"))
            return parser.make_children(ol)
        setattr(make_unordered_list,'newstate','unorderedlist')
                
        def make_listitem(parser):
            chunk = parser.reader.next()
            (listtype,ordinal,separator,rest) = analyze_listitem(chunk)
            li = ListItem(ordinal=ordinal)
            li.append(rest)
            return parser.make_children(li)
        setattr(make_listitem,'newstate','listitem')

        def make_rfcheader(parser):
            headerchunk =  parser.reader.next()
            if is_rfcheader(parser):
                headerchunk += "\n" + parser.reader.next()
            return RFCHeader(headerchunk)

        def make_doctitle(parser):
            return DocTitle(parser.reader.next())
        
        # Some helpers for the above
        def section_segments_count(s):
            return ((s is not None) and 
                    len(list(filter(None,s.split(".")))))

        # Matches
        # "1 Blahonga" => ("1","Blahonga")
        # "1.2.3. This is a subsubsection" => ("1.2.3", "This is a subsection")
        re_sectionstart = re.compile("^(\d[\.\d]+) +(.*[^\.])$").match
        def analyze_sectionstart(parser,chunk=None):
            if not chunk:
                chunk = parser.reader.peek()
            m = re_sectionstart(chunk)
            if m:
                return (m.group(1).rstrip("."), m.group(2))
            else:
                return (None,chunk)

        def analyze_listitem(chunk):
            # returns: same as list-style-type in CSS2.1, sans
            # 'georgian', 'armenian' and 'greek', plus 'dashed'
            listtype = ordinal = separator = rest = None

            # FIXME: Tighten these patterns to RFC conventions
            # match "1. Foo..." or "14) bar..." but not "4 This is a heading"

#            m = re.match('^(\d+)([\.\)]) +',chunk)
#            if m:
#                if chunk.startswith("0"):
#                    listtype="decimal-leading-zero"
#                else:
#                    listtype="decimal"
#                (ordinal,separator) = m.groups()
#                rest = chunk[m.end():]
#                return (listtype,ordinal,separator,rest)
#
#            # match "IX. Foo… or "vii) bar…" but not "vi is a sucky
#            # editor" or "MMXIII is the current year"
#            m = re.match('^([IVXivx]+)([\.\)]) +', chunk)
#            if m:
#                if chunk[0].islower():
#                    listtype = 'lower-roman'
#                else:
#                    listtype = 'upper-roman'
#                (ordinal,separator) = m.groups()
#                rest = chunk[m.end():]
#                return (listtype,ordinal,separator,rest)
#
#            # match "a. Foo… or "z) bar…" but not "to. Next sentence…"
#            m = re.match('^([A-Za-z])([\.\)]) +', chunk)
#            if m:
#                if chunk[0].islower():
#                    listtype = 'lower-alpha'
#                else:
#                    listtype = 'upper-alpha'
#                (ordinal,separator) = m.groups()
#                rest = chunk[m.end():]
#                return (listtype,ordinal,separator,rest)
#
            if chunk.startswith("   o  "):
                return ("disc",None,None,chunk[6:])
                
            return (listtype,ordinal,separator,chunk) # None * 3

        p = FSMParser()

        p.set_recognizers(is_pagebreak,
                          is_rfcheader,
                          is_doctitle,
                          is_section,
                          is_subsection,
                          is_subsubsection,
                          is_header,
                          is_ul_listitem,
                          is_preformatted,
                          is_definition_title,
                          is_definition,
                          is_paragraph)
        # start_state: "body" or "rfcheader", then "title", then
        # "preamble" (consisting of preamblesections that has title
        # (eg "Abtract", "Status of This Memo" + content), then "section".
        commonstates = ("section","subsection","subsubsection")
        p.set_transitions({("body", is_rfcheader):(make_rfcheader,"doctitle"),
                           ("doctitle", is_doctitle):(make_doctitle,"preamble"),
                           ("preamble", is_header):(make_preamble_section,"preamble-section"),
                           ("preamble-section", is_paragraph):(make_paragraph,None),
                           ("preamble-section", is_header):(False,None),
                           ("preamble-section", is_pagebreak):(skip_pagebreak,None),
                           ("preamble-section", is_section):(False,"after-preamble"),
                           ("after-preamble", is_section):(make_section, "section"),
                           ("section", is_subsection): (make_subsection,"subsection"),
                           ("section", is_section): (False,None),
                           ("subsection", is_subsubsection): (make_subsubsection, "subsubsection"),
                           ("subsection", is_subsection):(False,None),
                           ("subsection", is_section):(False,None),
                           ("subsubsection", is_subsubsection):(False,None),
                           ("subsubsection", is_subsection):(False,None),
                           ("subsubsection", is_section):(False,None),
                           (commonstates, is_ul_listitem):(make_unordered_list, "ul-list"),
                           ("ul-list", is_ul_listitem):(make_listitem, "listitem"),
                           ("ul-list", is_paragraph):(False,None),
                           ("listitem", is_paragraph):(False,None),
                           (commonstates, is_bnf):(make_bnf,None),
                           (commonstates, is_preformatted):(make_preformatted,None),
                           (commonstates, is_paragraph):(make_paragraph,None),
                           (commonstates, is_pagebreak):(skip_pagebreak,None),
                           })
        p.initial_state = "body"
        p.initial_constructor = make_body
        return p
                           
        
    @managedparsing
    def parse(self, doc):

        reader = TextReader(self.store.downloaded_path(doc.basefile),
                            linesep=TextReader.UNIX)
        # Some more preprocessing: Remove the faux-bold formatting
        # used in some RFCs (using repetitions of characters
        # interleaved with backspace control sequences). Note: that
        # is '\b' as in backspace, not r'\b' as in word boundary
        # docstring = re.sub('.\b','',docstring)
        cleanparagraphs = (re.sub('.\b','',x) for x in
                           reader.getiterator(reader.readparagraph))

        parser = self.get_parser()
        self.config.fsmdebug = 'FERENDA_FSMDEBUG' in os.environ
        parser.debug = self.config.fsmdebug
        doc.body = parser.parse(cleanparagraphs)

        header = doc.body.pop(0) # body.findByClass(RFCHeader)
        title  = " ".join(doc.body.pop(0).split()) # body.findByClass(DocHeader)
        for part in doc.body:
            if isinstance(part,PreambleSection) and part.title == "Table of Contents":
                doc.body.remove(part)
                break
        
        # create (RDF) metadata for document Note: The provided
        # basefile may be incorrect -- let whatever is in the header
        # override
        realid = self.get_rfc_num(header)
        doc.uri = self.canonical_uri(realid)
        desc = Describer(doc.meta, doc.uri)
        desc.rdftype(self.ns['bibo'].Standard)
        desc.value(self.ns['dct'].title, title, lang="en")
        self.parse_header(header,desc)
        doc.lang = "en"
        
        # process body - remove the temporary Pagebreak objects, after
        # having extracted the shortTitle found in them
        shorttitle = self.cleanup_body(doc.body)
        if shorttitle and (desc.getvalue(self.ns['dct'].title) != shorttitle):
            desc.value(self.ns['bibo'].shortTitle, shorttitle, lang="en")
        
        # process body - add good metadata
        from pyparsing import Word,alphanums
        bibref_cite = ("[" + Word(alphanums).setResultsName("ref") + "]").setResultsName("bibref")
        citparser = CitationParser(bibref_cite)
        citparser.set_formatter(URIFormatter(("bibref", lambda p: "#bib-%(ref)s" % p),
                                             ))
        doc.body = citparser.parse_recursive(doc.body)
        self.decorate_bodyparts(doc.body,doc.uri)

    def decorate_bodyparts(self,part,baseuri):
        if isinstance(part,six.text_type):
            return
        if isinstance(part,(Section, Subsection, Subsubsection)):
            # print("Decorating %s %s" % (part.__class__.__name__,part.ordinal))
            part.uri = "%s#S%s" % (baseuri,part.ordinal)
            part.meta = self.make_graph()
            desc = Describer(part.meta,part.uri)
            desc.rdftype(self.ns['bibo'].DocumentPart)
            desc.value(self.ns['dct'].title, Literal(part.title,lang="en"))
            desc.value(self.ns['bibo'].chapter, part.ordinal)
            # desc.value(self.ns['dct'].isPartOf, part.parent.uri) # implied
        for subpart in part:
            self.decorate_bodyparts(subpart,baseuri)

    def cleanup_body(self,part):
        shorttitle = None
        newparts = [] # a copy of the children w/o any Pagebreaks
        for subpart in part:
            if isinstance(subpart,Pagebreak):
                shorttitle = subpart.shorttitle
            else:
                if isinstance(subpart,six.text_type):
                    pass
                else:
                    short = self.cleanup_body(subpart)
                    if shorttitle is None:
                        shorttitle = short
                newparts.append(subpart)
        part[:] = newparts
        return shorttitle
    
    def get_rfc_num(self, header):
        lines = header.split("\n")
        left = [x.split("   ",1)[0].strip() for x in lines]
        for line in left[1:]:
            if ":" not in line:
                continue
            (key,val) = line.split(": ")
            if key == "Request for Comments":
                return val
        raise ParseError("Couldn't find RFC number in header")
            
        
    def parse_header(self,header,desc):
        # split header in left-hand and right-hand side, and line by line
        lines = header.split("\n")
        left = [x.split("   ",1)[0].strip() for x in lines]
        right= [x.split("   ",1)[1].strip() for x in lines if "   " in x]
        # first line of lefthand side is publishing organization (?)
        desc.value(self.ns['dct'].publisher, left[0])
        # following lefthand side are key-value headers
        for line in left[1:]:
            if line.strip() == "":
                continue
            if ": " not in line:
                self.log.warning("Cannot treat %r as a key-value header" % line)
                continue
            (key,value) = line.split(": ")
            if key == "Request for Comments":
                desc.value(self.ns['dct'].identifier, "RFC %s" % value)
            elif key == "Category":
                desc.value(self.ns['dct'].category, value)
            elif key == "ISSN":
                desc.value(self.ns['dct'].issn, value)
            elif key == "Updates":
                for valuepart in value.split(", "):
                    uri = self.canonical_uri(valuepart)
                    desc.rel(self.ns['dct'].updates, uri)
            elif key == "Obsoletes":
                for valuepart in value.split(", "):
                    uri = self.canonical_uri(valuepart)
                    desc.rel(self.ns['dct'].updates, uri)
            elif key == "BCP":
                desc.value(self.ns['rfc'].BCP, value)
            elif key == "STD":
                desc.value(self.ns['rfc'].STD, value)
            elif key == "FYI":
                desc.value(self.ns['rfc'].FYI, value)
            else:
                # Unknown headers seen: BCP, STD, FYI
                self.log.warning("Unknown header key %s (value %s)" % (key,value))

        # For right hand side, any line beginning with a single letter
        # followed by '. ' is probably a name
        for line in right:
            if re.match("[A-Z]\. ",line):
                desc.value(self.ns['dct'].creator, line)
            elif re.match("\w+ \d{4}$",line):
                # NOTE: this requires english locale!
                dt = datetime.strptime(line, "%B %Y")
                d = date(dt.year,dt.month,dt.day)
                desc.value(self.ns['dct'].issued, d)
            else:
                # company affiliation - include that separate from
                # personal author identity
                desc.value(self.ns['dct'].rightsHolder, line)
        
