#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import tempfile
import shutil
import time
import json
import codecs
import collections
import filecmp
from io import BytesIO, StringIO
from difflib import unified_diff
try:
    from unittest.mock import Mock, patch
except ImportError:
    from mock import Mock, patch
if sys.version_info < (2, 7, 0):
    import unittest2 as unittest
else:
    import unittest

import six
from six import text_type as str
from six import binary_type as bytes

import rdflib
from rdflib.compare import graph_diff
from rdflib.util import guess_format
from lxml.doctestcompare import LXMLOutputChecker
from lxml import etree


from ferenda import DocumentRepository
from ferenda import TextReader
from ferenda import elements
from ferenda import util

class FerendaTestCase(object):
    """Convenience class with extra AssertEqual methods."""
    # FIXME: Some of these should (at least optionally) be registered
    # with TestCase.assertEqual through .addTypeEqualityFunc, but some
    # (eg. assertEqualDirs) have non-unique types

    def assertEqualGraphs(self, want, got, exact=True):
        def _loadgraph(filename):
            g = rdflib.Graph()
            g.parse(filename, format=guess_format(filename))
            return g

        if not isinstance(want, rdflib.Graph):
            want = _loadgraph(want)
        if not isinstance(got, rdflib.Graph):
            got = _loadgraph(got)

        (in_both, in_first, in_second) = graph_diff(want, got)
        msg = ""
        if in_first:
            for (s, p, o) in sorted(in_first, key=lambda t:(t[0], t[1],t[2])):
                msg += "- %s %s %s\n" % (s.n3(), p.n3(), o.n3())
        if (exact and in_second) or in_first:
            for (s, p, o) in sorted(in_second, key=lambda t:(t[0], t[1],t[2])):
                msg += "+ %s %s %s\n" % (s.n3(), p.n3(), o.n3())
        if ((len(in_first) > 0) or (len(in_second) > 0 and exact)):
            if len(in_first) > 0:
                msg = "%s expected triples were not found\n" % len(in_first) + msg
            if len(in_second) > 0:
                msg = "%s unexpected triples were found\n" % len(in_second) + msg
            msg = "%r != %r\n" % (want, got) + msg
            self.fail(msg)
        
    def assertAlmostEqualDatetime(self, datetime1, datetime2, delta=1):
        # if the datetimes differ with max 1 second, they're almost equal)
        time1 = time.mktime(datetime1.timetuple())
        time2 = time.mktime(datetime2.timetuple())
        absdiff = abs(time1 - time2)
        self.assertLessEqual(absdiff, delta, "Difference between %s and %s "
                             "is %s seconds which is NOT almost equal" %
                             (datetime1.isoformat(), datetime2.isoformat(),
                              absdiff))

    def assertEqualXML(self, want, got):
        # Adapted from formencode, https://bitbucket.org/ianb/formencode/
        def xml_compare(want, got, reporter):
            if want.tag != got.tag:
                reporter("Tags do not match: 'want': %s, 'got': %s" % (want.tag, got.tag))
                return False
            for name, value in want.attrib.items():
                if got.attrib.get(name) != value:
                    reporter("Attributes do not match: 'want': %s=%r, 'got': %s=%r"
                             % (name, value, name, got.attrib.get(name)))
                    return False
            for name in got.attrib.keys():
                if name not in want.attrib:
                    reporter("'got' has an attribute 'want' is missing: %s"
                             % name)
                    return False
            if not text_compare(want.text, got.text):
                reporter("text: 'want': %r, 'got': %r" % (want.text, got.text))
                return False
            if not text_compare(want.tail, got.tail):
                reporter("tail: 'want': %r != 'got': %r" % (want.tail, got.tail))
                return False
            cl1 = want.getchildren()
            cl2 = got.getchildren()
            if len(cl1) != len(cl2):
                reporter("children length differs, 'want': %i, 'got': %i"
                         % (len(cl1), len(cl2)))
                return False
            i = 0
            for c1, c2 in zip(cl1, cl2):
                i += 1
                if not xml_compare(c1, c2, reporter=reporter):
                    reporter('children %i do not match: %s'
                             % (i, c1.tag))
                    return False
            return True

        def text_compare(want, got):
            if not want and not got:
                return True
            return (want or '').strip() == (got or '').strip()

        def treeify(something):
            if isinstance(something, str):
                
                fp = BytesIO(something.encode('utf-8'))
                # return etree.fromstring(something)
                return etree.parse(fp)
            elif isinstance(something, bytes):
                fp = BytesIO(something)
                # return etree.parse(fp).getroot()
                return etree.parse(fp)
            elif isinstance(want, etree._Element):
                # FIXME: wrap in ElementTree
                return something
            else:
                raise ValueError("Can't convert a %s into an ElementTree" % type(something))

        def c14nize(tree):
            tmp = BytesIO()
            tree.write_c14n(tmp)
            return tmp.getvalue().decode('utf-8')
            
        errors = []
        want_tree = treeify(want)
        got_tree = treeify(got)
        xml_compare(want_tree.getroot(),
                    got_tree.getroot(),
                    errors.append)
        
        if errors:
            want_lines = [x + "\n" for x in c14nize(want_tree).split("\n")]
            got_lines = [x + "\n" for x in c14nize(got_tree).split("\n")]
            diff = unified_diff(want_lines, got_lines, "want.xml", "got.xml")
            msg = "".join(diff) + "\n\nERRORS:" + "\n".join(errors)
            raise AssertionError(msg)
        
    def assertEqualDirs(self, want, got, suffix=None, filterdir="entries"):
        wantfiles = [x[len(want) + 1:] for x in util.list_dirs(want, suffix) if not x.startswith(want+os.sep+filterdir)]
        gotfiles = [x[len(got) + 1:] for x in util.list_dirs(got, suffix) if not x.startswith(got+os.sep+filterdir)]
        self.maxDiff = None
        self.assertEqual(wantfiles, gotfiles)  # or assertIn?
        for f in gotfiles:
            self.assertTrue(filecmp.cmp(os.path.join(want, f),
                                        os.path.join(got, f),
                                        shallow=False))
    

class RepoTester(unittest.TestCase, FerendaTestCase):
    # A subclass must override these two
    repoclass = DocumentRepository
    docroot = '/tmp'

    def setUp(self):
        self.datadir = tempfile.mkdtemp()
        self.repo = self.repoclass(datadir=self.datadir)
            
    def tearDown(self):
        # print("Not removing %s" % self.datadir)
        shutil.rmtree(self.datadir)

    def filename_to_basefile(self, filename):
        """Converts a test filename to a basefile. Default implementation
           simply returns a hard-coded basefile.
        
        :param filename: The test file
        :type filename: str
        :returns: Corresponding basefile
        :rtype: str

        """
        return "1"
        
    def download_test(self, specfile):

        def my_get(url, **kwargs):
            urlspec = spec[url]
            if isinstance(urlspec, str):
                urlspec = {'file': urlspec}
            if 'charset' not in urlspec:
                urlspec['charset'] = 'utf-8'
            url_location = os.path.join(os.path.dirname(specfile),
                                        urlspec['file'])
            res = Mock()
            # load up both .text and .content properties
            with codecs.open(url_location, "r", encoding=urlspec['charset']) as fp:
                res.text = fp.read()
            with open(url_location, "rb") as fp:
                res.content = fp.read()
            res.headers = collections.defaultdict(lambda: None)
            res.headers['X-These-Headers-Are'] = 'Faked'
            res.status_code = 200
            return res
        with open(specfile) as fp:
            spec = json.load(fp)
        with patch('requests.get', side_effect=my_get):
            self.repo.download()

        # organize a temporary copy of files that we can compare our results to
        wantdir = "%s/%s-want" % (self.datadir, self.repoclass.alias)
        for url in spec:
            if "expect" in spec[url]:
                sourcefile = os.path.join(os.path.dirname(specfile),
                                          spec[url]['file'])
                wantfile = "%s/%s" % (wantdir, spec[url]['expect'])
                util.copy_if_different(sourcefile,wantfile)

        self.assertEqualDirs(wantdir,
                             "%s/%s" % (self.datadir,
                                        self.repoclass.alias))

    def distill_test(self, downloaded_file, rdf_file, docroot):
        try:
            prefixlen = len(docroot+"/downloaded/")
            suffixlen = len(self.repo.store.downloaded_suffix)
            pathfrag  = downloaded_file[prefixlen:-suffixlen]
            basefile = self.repo.store.pathfrag_to_basefile(pathfrag)
        except:
            basefile = self.filename_to_basefile(downloaded_file)
        with patch('ferenda.DocumentStore.downloaded_path',
                   return_value=downloaded_file):
            self.repo.parse(basefile)
        self.assertEqualGraphs(rdf_file,
                               self.repo.store.distilled_path(basefile),
                               exact=False)
                     
        
    def parse_test(self, downloaded_file, xhtml_file, docroot):
        # patch method so we control where the downloaded doc is
        # loaded from.
        basefile = self.filename_to_basefile(downloaded_file)
        with patch('ferenda.DocumentStore.downloaded_path',
                   return_value=downloaded_file):
            self.repo.parse(basefile)
        if 'FERENDA_SET_TESTFILES' in os.environ:
            print("Overwriting %s with result of parse(%s)" % (xhtml_file, basefile))
            util.robust_rename(xhtml_file, xhtml_file+"~")
            shutil.copy2(self.repo.store.parsed_path(basefile), xhtml_file)
            return 
        self.assertEqualXML(util.readfile(xhtml_file),
                            util.readfile(self.repo.store.parsed_path(basefile)))

    # for win32 compatibility and simple test case code
    def p(self, path, prepend_datadir=True):
        if prepend_datadir:
            path = self.datadir + "/" + path
        return path.replace('/', '\\') if os.sep == '\\' else path


def parametrize(cls, template_method, name, params, wrapper=None):

    def test_method(self):
        template_method(self, *params)

    # py2 compat: name is a unicode object, func.__name__ must be a str(?)
    if six.PY3:
        test_method.__name__ = name
    else:
        # note that we have redefined str to six.text_type
        test_method.__name__ = bytes(name)
    # wrapper is a unittest decorator like skip or expectedFailure
    if wrapper:
        setattr(cls, name, wrapper(test_method))
    else:
        setattr(cls, name, test_method)


def file_parametrize(cls, directory, suffix, filter=None, wrapper=None):
    # Call with any class that subclasses unittest.TestCase and which has
    # a parametric_test method, like so:
    #class MyTest(unittest.TestCase):
    #    def parametric_test(self,filename):
    #        self.assertTrue(os.path.exists(filename))
    #
    #from ferenda.testutil import file_parametrize
    #
    #file_parametrize(Parse,"test/files/legaluri",".txt")
    params = []
    for filename in os.listdir(directory):
        if filename.endswith(suffix):
            if filter and filter(filename):
                continue
            testname = filename[:-len(suffix)]
            testname = "test_" + testname.replace("-", "_")
            params.append((testname, directory + os.path.sep + filename))

    for (name, param) in params:
        parametrize(cls, cls.parametric_test, name, (param,), wrapper)


def parametrize_repotester(cls):
    docroot = cls.docroot
    # 1. download tests
    for filename in os.listdir(docroot + "/source"):
        if filename.endswith(".json"):
            testname = "test_download_" + filename[:-5].replace("-", "_")
            fullname = docroot + "/source/" + filename
            parametrize(cls, cls.download_test, testname, (fullname,))
    # 2. parse tests
    suf = cls.repoclass.downloaded_suffix
    # for filename in os.listdir(docroot + "/downloaded"):
    basedir = docroot + "/downloaded"
    for filename in util.list_dirs(basedir, suffix=suf):
        filename = filename[len(basedir)+1:]
        if filename.endswith(suf):
            downloaded_file = "%s/downloaded/%s" % (docroot, filename)
            basefile = os.path.splitext(filename)[0] # shld we use store.pathfrag_to_basefile?
            basefile = basefile.replace("\\", "/")
            basetest = basefile.replace("-","_").replace("/", "_")
            # Test 1: is rdf distilled correctly?
            rdf_file = "%s/distilled/%s.ttl" % (docroot, basefile)
            testname = ("test_distill_" + basetest)

            wrapper = unittest.expectedFailure if not os.path.exists(rdf_file) else None 
            parametrize(cls, cls.distill_test, testname, (downloaded_file, rdf_file, docroot), wrapper)

            # Test 2: is xhtml parsed correctly?
            xhtml_file = "%s/parsed/%s.xhtml" % (docroot, basefile)
            testname = ("test_parse_" + basetest)

            wrapper = unittest.expectedFailure if not os.path.exists(xhtml_file) else None 
            parametrize(cls, cls.parse_test, testname, (downloaded_file, xhtml_file, docroot), wrapper)


def testparser(testcase, parser, filename):
    wantfilename = filename.replace(".txt", ".xml")
    if not os.path.exists(wantfilename):
        parser.debug = True

    tr = TextReader(filename, encoding="utf-8", linesep=TextReader.UNIX)
    b = parser.parse(tr.getiterator(tr.readparagraph))
    testcase.maxDiff = 4096
    if os.path.exists(wantfilename):
        with codecs.open(wantfilename, encoding="utf-8") as fp:
            want = fp.read().strip()
        got = elements.serialize(b).strip()
        testcase.assertEqualXML(want,got)
    else:
        raise AssertionError("Want file not found. Result of parse:\n" +
                             elements.serialize(b))
