# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""General  library of small utility functions"""

import os
import sys
import subprocess
import codecs
import shutil
import locale
import re
from tempfile import mktemp
from collections import namedtuple
import filecmp
import hashlib
import datetime
import posixpath
import six
import time
from contextlib import contextmanager
if six.PY3:
    from urllib.parse import urlsplit, urlunsplit
else:
    from urlparse import urlsplit, urlunsplit
from email.utils import parsedate_tz
try:
    from functools import cmp_to_key # new in 2.7
except ImportError:
    def cmp_to_key(mycmp):
        'Convert a cmp= function into a key= function'
        class K(object):
            def __init__(self, obj, *args):
                self.obj = obj
            def __lt__(self, other):
                return mycmp(self.obj, other.obj) < 0
            def __gt__(self, other):
                return mycmp(self.obj, other.obj) > 0
            def __eq__(self, other):
                return mycmp(self.obj, other.obj) == 0
            def __le__(self, other):
                return mycmp(self.obj, other.obj) <= 0
            def __ge__(self, other):
                return mycmp(self.obj, other.obj) >= 0
            def __ne__(self, other):
                return mycmp(self.obj, other.obj) != 0
        return K
    
import bs4
import pkg_resources

from . import errors

# We should reorganize this, maybe in util.File, util.String, and so on...


# util.Namespaces
# Set up common namespaces and suitable prefixes for them
ns = {'dc': 'http://purl.org/dc/elements/1.1/',
      'dct': 'http://purl.org/dc/terms/',
      'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
      'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
      'skos': 'http://www.w3.org/2004/02/skos/core#',
      'xsd': 'http://www.w3.org/2001/XMLSchema#',
      'foaf': 'http://xmlns.com/foaf/0.1/',
      'owl': 'http://www.w3.org/2002/07/owl#',
      'xhv': 'http://www.w3.org/1999/xhtml/vocab#',
      'prov': 'http://www.w3.org/ns/prov-o/',
      'bibo': 'http://purl.org/ontology/bibo/',
      # FIXME: These non-general
      # vocabularies should not be
      # used in a general utility
      # module like this
      # 'rinfo': 'http://rinfo.lagrummet.se/taxo/2007/09/rinfo/pub#',
      # 'rpubl': 'http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#',
      # 'rinfoex': 'http://lagen.nu/terms#',
      # 'eurlex': 'http://lagen.nu/eurlex#',
      # 'ir': 'http://lagen.nu/informationretrieval#',
      }
"""A mapping of well-known prefixes and their corresponding namespaces."""

# util.File


def mkdir(newdir):
    if not os.path.exists(newdir):
        os.makedirs(newdir)

# util.File


def ensure_dir(filename):
    d = os.path.dirname(filename)
    if d and not os.path.exists(d):
        try:
            mkdir(d)
        except OSError:
            # A separate process (when running multiprocessing) might
            # have created the directory
            pass

# util.File


def robust_rename(old, new):
    """Rename old to new no matter what (if the file exists, it's
    removed, if the target dir doesn't exist, it's created)"""
    # print "robust_rename: %s -> %s" % (old,new)
    ensure_dir(new)
    if os.path.exists(new):
        #try:
        os.unlink(new)
        #except WindowsError:
        #    print "Caught WindowsError, sleeping"
        #    import time
        #    time.sleep(1)
        #    os.unlink(new)
    # os.rename may fail across file systems
    try:
        shutil.move(old, new)
    except IOError:
        # eh, what are you gonna do?
        pass

# util.File


def robust_remove(file):
    if os.path.exists(file):
        #try:
        os.unlink(file)

# relpath was introduced in py26, but that's the lowest ver we support
# -- no need for backport
# def relpath(path, start=os.curdir):

# like os.relpath, but for urls
def relurl(url, starturl):
    urlseg = urlsplit(url)
    startseg = urlsplit(starturl)
    urldomain = urlunsplit(urlseg[:2]+tuple('' for i in range(3)))
    startdomain = urlunsplit(startseg[:2]+tuple('' for i in range(3)))
    if urldomain != startdomain: # different domain, no relative url possible
        return url 

    relpath = posixpath.relpath(urlseg.path, posixpath.dirname(startseg.path))
    res = urlunsplit(('', '', relpath, urlseg.query, urlseg.fragment))
    return res

# util.Sort

# FIXME: Is this even used (since the cmp param has been removed in
# py3)? Change to a key function (or just use split_numalpha)?

def numcmp(x, y):
    """Sorts ['1','10','1a', '2'] => ['1', '1a', '2', '10']"""
    nx = split_numalpha(x)
    ny = split_numalpha(y)
    return (nx > ny) - (nx < ny)  # equivalent to cmp which is not in py3

# util.Sort
def split_numalpha(s):
    """'10 a §' => [10, ' a §']"""
    res = []
    seg = ''
    digit = s[0].isdigit()
    for c in s:
        if (c.isdigit() and digit) or (not c.isdigit() and not digit):
            seg += c
        else:
            res.append(int(seg) if seg.isdigit() else seg)
            seg = c
            digit = not digit
    res.append(int(seg) if seg.isdigit() else seg)
    if isinstance(res[0],int):
        res.insert(0,'') # to make sure every list has type str,int,str,int....
    return res


def indent_xml_file(infile):
    """Neatifies an existing XML file in-place"""
    tmpfile = mktemp()
    cmd = "tidy -q -xml -asxml -utf8 -w 95 -i %s > %s" % (infile, tmpfile)
    # print cmd
    (ret, stdout, stderr) = runcmd(cmd)
    if (ret != 0):
        raise errors.TransformError(stderr)
    replace_if_different(tmpfile, infile)


def indent_html_file(infile):
    """Neatifies an existing XHTML file in-place"""
    tmpfile = mktemp()
    cmd = "tidy -q -asxhtml -utf8 -w 95 -i %s > %s" % (infile, tmpfile)
    print(("indent_html_file: Running %s " % cmd))
    (ret, stdout, stderr) = runcmd(cmd)
    if (ret != 0):
        raise errors.TransformError(stderr)
    replace_if_different(tmpfile, infile)

# util.XML


def tidy(tagsoup):
    tmpin = mktemp()
    tmpout = mktemp()
    f = open(tmpin, "w")
    if (isinstance(tagsoup, str)):
        f.write(tagsoup.encode('utf-8'))
    else:
        f.write(tagsoup)
    f.close()

    cmd = "%s -q -asxhtml -utf8 %s > %s" % ("tidy", tmpin, tmpout)
    (ret, stdout, stderr) = runcmd(cmd)
    robust_remove(tmpin)

    #if (stderr):
    #    print "WARN: %s" % stderr

    f = codecs.open(tmpout, encoding="utf-8")
    result = f.read()
    f.close()
    robust_remove(tmpout)

    return result

# util.XML


def transform(stylesheet, infile, outfile, parameters={}, validate=True, xinclude=False, keep_unchanged=False):
    """Does a XSLT transform with the selected stylesheet. Afterwards, formats the resulting HTML tree and validates it"""

    #parameters['infile'] = infile;
    #parameters['outfile'] = outfile;

    param_str = ""
    for p in list(parameters.keys()):
        # this double style quoting is needed for lawlist.xsl when
        # using the tagname parameter on macos. Maybe for other
        # reasons as well, I dunno
        param_str += "--param %s \"'%s'\" " % (p, parameters[p])

    if xinclude:
        tmpfile = mktemp()
        cmdline = "xmllint --xinclude --encode utf-8 %s > %s" % (
            infile, tmpfile)
        # print cmdline
        (ret, stdout, stderr) = runcmd(cmdline)
        #if (ret != 0):
        #    raise errors.TransformError(stderr)
        infile = tmpfile

    if ' ' in infile:
        infile = '"%s"' % infile
    tmpfile = mktemp()
    cmdline = "xsltproc --nonet %s %s %s > %s" % (
        param_str, stylesheet, infile, tmpfile)
    print(cmdline)
    (ret, stdout, stderr) = runcmd(cmdline)
    if (ret != 0):
        raise errors.TransformError(stderr)

    # If ret == 0, then whatever's printed on stderr are just warnings
    # (most likely 'failed to load external entity "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd"')
    #if stderr:
    #    print "Transformation error: %s" % stderr

    # Default behaviour is now to change the resulting file so that
    # timestamps reflect the fact that the transformed file is more
    # recent than the ingoing files.
    if keep_unchanged:
        replace_if_different(tmpfile, outfile)
    else:
        robust_rename(tmpfile, outfile)

    if os.path.exists(tmpfile):
        os.unlink(tmpfile)
    if xinclude:
        os.unlink(infile)
    if validate:
        cmdline = "xmllint --noout --nonet --nowarning --dtdvalid %s/dtd/xhtml1-strict.dtd %s" % (basepath, outfile)
        (ret, stdout, stderr) = runcmd(cmdline)
        if (ret != 0):
            raise errors.ValidationError(stderr)

# util.Sort


def unique_list(*lists):
    slots = {}
    for l in lists:
        for i in l:
            slots[i] = 1
    return list(slots.keys())

# util.Process


def runcmd(cmdline, require_success=False, cwd=None):
    # print("runcmd: %r" % cmdline)
    cmdline_needs_encoding = False  # not needed on mac, maybe on other platforms?
    if isinstance(cmdline, str) and cmdline_needs_encoding:
        # FIXME: How do we detect the proper encoding? Using
        # sys.stdout.encoding gives 'cp850' on windows, which is not
        # what xsltproc expects
        coding = 'utf-8' if sys.stdin.encoding == 'UTF-8' else 'iso-8859-1'
        cmdline = cmdline.encode(coding)

    p = subprocess.Popen(
        cmdline, cwd=cwd, shell=True,
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = p.communicate()
    ret = p.returncode
    # print "runcmd '%s...': %s, '%s...', '%s...'" % (cmdline[:15], ret, stdout[:15], stderr[:15])
    if sys.stdout.encoding:
        enc = sys.stdout.encoding
    else:
        enc = locale.getpreferredencoding()

    if isinstance(stdout, str):
        stdout = stdout.decode(enc)
    if isinstance(stderr, str):
        stderr = stderr.decode(enc)

    if (require_success and ret != 0):
        # FIXME: ExternalCommandError should have fields for cmd and
        # ret as well (and a sensible __str__ implementatiton)
        raise errors.ExternalCommandError(stderr)
    return (p.returncode, stdout, stderr)

# util.String


def normalize_space(string):
    return ' '.join(string.split())

# util.File


def list_dirs(d, suffix=None, reverse=False):
    """A generator that works much like os.listdir, only recursively (and only returns files, not directories)"""
    # inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/161542

    #if isinstance(d,str):
    #    print("WARNING: list_dirs was called with str. Use unicode instead, plz")
    directories = [d]
    while directories:
        d = directories.pop()
        # for f in sorted(os.listdir(d),cmp=numcmp,reverse=reverse):
#        for f in os.listdir(d):
#            seg = split_numalpha(f)
#            if type(seg[0]) != int or type(seg[1]) != str:
#                print("\t".join([repr(x) for x in seg]))

        for f in sorted(os.listdir(d), key=split_numalpha, reverse=reverse):
            #print "f is %s" % f
            f = "%s%s%s" % (d, os.path.sep, f)
            if os.path.isdir(f):
                directories.insert(0, f)
            elif os.path.isfile:
                if suffix and not f.endswith(suffix):
                    continue
                else:
                    #print "yielding %s" % f
                    yield f


## util.String (or XML?)
def element_text(element):
    raise NotImplementedError("Don't use this!")
#    """finds the plaintext contained in a BeautifulSoup element"""
#    return normalize_space(
#        ''.join(
#        [e for e in element.recursiveChildGenerator()
#         if (isinstance(e, str) and
#             not isinstance(e, bs4.Comment))]))

def indent_et(elem, level=0):
    i = "\r\n" + level * "  "
    if len(elem):
        if not elem.text or not elem.text.strip():
            elem.text = i + "  "
        for e in elem:
            indent_node(e, level + 1)
            if not e.tail or not e.tail.strip():
                e.tail = i + "  "
        if not e.tail or not e.tail.strip():
            e.tail = i
    else:
        if level and (not elem.tail or not elem.tail.strip()):
            elem.tail = i

# util.String (or XML?)


def indent_node(elem, level=0):
    i = "\r\n" + level * "  "
    if len(elem):
        if not elem.text or not elem.text.strip():
            elem.text = i + "  "
        for elem in elem:
            indent_node(elem, level + 1)
        if not elem.tail or not elem.tail.strip():
            elem.tail = i
    else:
        if level and (not elem.tail or not elem.tail.strip()):
            elem.tail = i

# util.File


def replace_if_different(newfile, oldfile, archivefile=None):
    assert os.path.exists(newfile)
    if not os.path.exists(oldfile):
        # print "old file %s didn't exist" % oldfile
        robust_rename(newfile, oldfile)
        return True
    elif not filecmp.cmp(newfile, oldfile):
        # print "old file %s different from new file %s" % (oldfile,newfile)
        if archivefile:
            robust_rename(oldfile, archivefile)
        robust_rename(newfile, oldfile)
        return True
    else:
        # print "old file %s identical to new file %s" % (oldfile,newfile)
        os.unlink(newfile)
        return False

# util.File


def copy_if_different(src, dest):
    if not os.path.exists(dest):
        ensure_dir(dest)
        shutil.copy2(src, dest)
    elif not filecmp.cmp(src, dest):
        os.unlink(dest)
        shutil.copy2(src, dest)
    else:
        pass

# util.File


def outfile_is_newer(infiles, outfile):
    """check to see if the outfile is newer than all ingoing files
    (which means there's no need to regenerate outfile)"""
    if not os.path.exists(outfile):
        return False
    outfile_mtime = os.stat(outfile).st_mtime
    for f in infiles:
        # print "Testing whether %s is newer than %s" % (f, outfile)
        if os.path.exists(f) and os.stat(f).st_mtime > outfile_mtime:
            # print "%s was newer than %s" % (f, outfile)
            return False
    # print "%s is newer than %r" % (outfile, infiles)
    return True

# util.file


def link_or_copy(src, dest):
    ensure_dir(dest)
    if os.path.lexists(dest):
        os.unlink(dest)
    if os.symlink:
        # The semantics of symlink are not identical to copy. The
        # source must be relative to the destination, not relative to
        # cwd at creation time.
        relsrc = relpath(src, os.path.dirname(dest))
        os.symlink(relsrc, dest)
    else:
        copy_if_different(src, dest)


# util.string
def ucfirst(string):
    l = len(string)
    if l == 0:
        return string
    elif l == 1:
        return string.upper()
    else:
        return string[0].upper() + string[1:]

# util.time
# From http://bugs.python.org/issue7584#msg96917


def rfc_3339_timestamp(dt):

    if dt.tzinfo is None:
        suffix = "-00:00"
    else:
        suffix = dt.strftime("%z")
        suffix = suffix[:-2] + ":" + suffix[-2:]
    return dt.strftime("%Y-%m-%dT%H:%M:%S") + suffix

# more-or-less the same as a HTTP-date, eg "Mon, 4 Aug 1997 02:14:00
# EST". Converts to an UTC-localized (naive) datetime object (eg
# datetime.datetime(1997, 8, 4, 7, 14) since EST is 5 hours behind UTC)


def parse_rfc822_date(httpdate):
    # FIXME: the rfc822 module is deprecated and should be replaced
    # with calls to the email module

    # parsed_date=rfc822.parsedate_tz(httpdate)
    parsed_date = parsedate_tz(httpdate)
    return (datetime.datetime(*parsed_date[:7]) -
            datetime.timedelta(seconds=parsed_date[9]))


# util.file
def readfile(filename, mode="r"):
    with open(filename, mode=mode) as fp:
        return fp.read()

# util.file


def writefile(filename, contents, encoding="utf-8"):
    ensure_dir(filename)
    with codecs.open(filename, "w", encoding=encoding) as fp:
        fp.write(contents)


# util.string
def extract_text(html, start, end, decode_entities=True, strip_tags=True):
    startidx = html.index(start)
    endidx = html.rindex(end)
    text = html[startidx + len(start):endidx]
    if decode_entities:
        from html.entities import name2codepoint
        entities = re.compile("&(\w+?);")
        text = entities.sub(lambda m: chr(name2codepoint[m.group(1)]), text)
    if strip_tags:
        # http://stackoverflow.com/a/1732454
        tags = re.compile("</?\w+>")
        text = tags.sub('', text)
    return text


# util.string
def md5sum(filename):
    c = hashlib.md5()
    with open(filename, 'rb') as fp:
        c.update(fp.read())
    return c.hexdigest()

def merge_dict_recursive(base, other):
    for (key, value) in list(other.items()):
        if (isinstance(value, dict) and
            (key in base) and
                (isinstance(base[key], dict))):
            base[key] = merge_dict_recursive(base[key], value)
        else:
            base[key] = value
    return base

def resource_extract(resource_name, outfile, params={}):
    fp = pkg_resources.resource_stream('ferenda', resource_name)
    resource = fp.read().decode('utf-8')
    if params:
        resource = resource % params
    ensure_dir(outfile)
    with codecs.open(outfile,"w") as fp:
        fp.write(resource)

# http://stackoverflow.com/a/7142094
def print_open_fds():
    '''
    return the number of open file descriptors for current process

    .. warning: will only work on UNIX-like os-es.
    '''
    import subprocess
    import os

    pid = os.getpid()
    procs = subprocess.check_output( 
        [ "lsof", '-w', '-Ff', "-p", str( pid ) ] ).decode('utf-8')

    fprocs = list(filter(lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
                procs.split( '\n' )))
    print("Open file descriptors: " + ", ".join(fprocs))


# Copied from rdfextras.utils.pathutils
def uri_leaf(uri):
    """
    Get the "leaf" - fragment id or last segment - of a URI. Useful e.g. for
    getting a term from a "namespace like" URI."""
    for char in ('#', '/', ':'):
        if uri.endswith(char):
            break
        if char in uri:
            sep = char
            leaf = uri.rsplit(char)[-1]
        else:
            sep = ''
            leaf = uri
        if sep and leaf:
            return leaf

# context mgr that logs elapsed time. use like so:
#
# with util.logtime(log.debug, "Basefile %(basefile)s took %(elapsed).3f s", {'basefile':'foo'}):
#     do_stuff_that_takes_some_time()
#
# results in a call like log.debug("Basefile foo took 1.324 s")
@contextmanager
def logtime(method, format="The operation took %(elapsed).3f sec", values={}):
    start = time.time()
    yield
    values['elapsed'] = time.time() - start
    method(format % values)

# Example code from http://www.diveintopython.org/
def from_roman(s):
    """convert Roman numeral to integer"""
    roman_numeral_map = (('M', 1000),
                         ('CM', 900),
                         ('D', 500),
                         ('CD', 400),
                         ('C', 100),
                         ('XC', 90),
                         ('L', 50),
                         ('XL', 40),
                         ('X', 10),
                         ('IX', 9),
                         ('V', 5),
                         ('IV', 4),
                         ('I', 1))
    result = 0
    index = 0
    for numeral, integer in roman_numeral_map:
        while s[index:index + len(numeral)] == numeral:
            result += integer
            index += len(numeral)
    return result
