"""
Akamu Harness for W3C SPARQL1.1 JSON/TSV/CSV result format tests
"""

import unittest,os,json,csv,sys, datetime
from pprint import pprint
from urllib2 import urlopen
from rdflib.Namespace import Namespace
from rdflib.Collection import Collection
from rdflib import RDF,RDFS,URIRef,Variable,BNode,Literal
from cStringIO import StringIO
from rdflib.Graph import Graph,ReadOnlyGraphAggregate
from rdflib.syntax.NamespaceManager import NamespaceManager
from rdflib.sparql.parser import parse
from rdflib.OWL import OWLNS
from rdflib.util import first
from amara.lib import U
from amara.lib.iri import uri_to_os_path
from akamu.util import enum
from akamu.protocol.sparql import RESULT_FORMAT, GetResultFormats

DC          = Namespace('http://purl.org/dc/elements/1.1/')
MANIFEST    = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#')
QUERY       = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-query#')
SD          = Namespace('http://www.w3.org/ns/sparql-service-description#')
TEST        = Namespace('http://www.w3.org/2009/sparql/docs/tests/data-sparql11/csv-tsv-res/manifest#')
EARL        = Namespace('http://www.w3.org/ns/earl#')
MY_FOAF     = Namespace('http://metacognition.info/public_rdf/n3/foaf.ttl#')

TEST_TYPES = enum(CSV='CSV',TSV='TSV',JSON='JSON')

TEST_DIRS = [
    u'json-res',
    u'csv-tsv-res',
]

SKIP = []

nsMap = {
  u'rdfs' :RDFS.RDFSNS,
  u'rdf'  :RDF.RDFNS,
  u'mf'   :MANIFEST,
  u'test' :MANIFEST,
  u'qt'   :QUERY
}
MANIFEST_QUERY = \
"""
SELECT ?test ?name ?queryFile ?rdfDoc ?result
WHERE {
  ?test a ?kind;
      mf:name ?name;
      mf:action [
        qt:query ?queryFile;
        qt:data  ?rdfDoc;
      ];
      mf:result ?result
  FILTER(
    ?kind = test:CSVResultFormatTest ||
    ?kind = test:QueryEvaluationTest
  )
} ORDER BY ?test """

PERSON_AND_PROJECT =\
"""
@prefix myfoaf: <http://metacognition.info/public_rdf/n3/foaf.ttl#>.
@prefix doap: <http://usefulinc.com/ns/doap#>.
@prefix earl: <http://www.w3.org/ns/earl#>.
@prefix foaf: <http://xmlns.com/foaf/0.1/>.
@prefix rdf:  <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
@prefix dc:   <http://purl.org/dc/elements/1.1/>.
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix json: <http://www.w3.org/2009/sparql/docs/tests/data-sparql11/json-res/manifest#>.
@prefix csvtsv: <http://www.w3.org/2009/sparql/docs/tests/data-sparql11/csv-tsv-res/manifest#>.

myfoaf:chime
    a foaf:Person;
    foaf:homepage <http://metacognition.info>;
    foaf:name "Chimezie Ogbuji".

<http://code.google.com/p/akamu/>
    a               doap:Project;
    doap:name       "Akamu";
    doap:maintainer myfoaf:chime;
    doap:release
        [   a doap:Version; doap:name "Akamu" ]."""

test_graph = Graph().parse(StringIO(PERSON_AND_PROJECT),format='n3')

def GetTests():
    for test_dir in TEST_DIRS:
        manifest_file = os.path.join(test_dir,'manifest.ttl')
        manifestGraph = Graph().parse(
            open(manifest_file),
            format='n3')
        rt = manifestGraph.query(
                                  MANIFEST_QUERY,
                                  initNs=nsMap,
                                  DEBUG = False)
        for test, name, queryFile, rdfDoc, result in rt:
            if result.split('.')[-1] == 'srj':
                test_type = RESULT_FORMAT.JSON
            elif result.split('.')[-1] == 'csv':
                test_type = RESULT_FORMAT.CSV
            else:
                test_type = RESULT_FORMAT.TSV
            yield test, \
                  name, \
                  queryFile, \
                  rdfDoc, \
                  result, \
                  test_type


def castToTerm(node):
    if node.xml_local == 'bnode':
        return BNode(U(node))
    elif node.xml_local == 'uri':
        return URIRef(U(node))
    elif node.xml_local == 'literal':
        if node.xml_select('string(@datatype)'):
            dT = URIRef(U(node.xpath('string(@datatype)')))
            return Literal(U(node),datatype=dT)
        else:
            return Literal(U(node))
    else:
        raise NotImplementedError()

def parseResults(sparqlRT):
    from amara import bindery
    actualRT = []
    doc = bindery.parse(sparqlRT,
                        prefixes={
                            u'sparql':u'http://www.w3.org/2005/sparql-results#'})
    askAnswer=doc.xml_select('string(/sparql:sparql/sparql:boolean)')
    if askAnswer:
        askAnswer = U(askAnswer)
        actualRT=askAnswer==u'true'
    else:
        for result in doc.xml_select('/sparql:sparql/sparql:results/sparql:result'):
            currBind = {}
            for binding in result.binding:
                varVal = U(binding.name)
                var = Variable(varVal)
                term = castToTerm(binding.xml_select('*')[0])
                currBind[var]=term
            if currBind:
                actualRT.append(currBind)
    return actualRT
        
class TestSequence(unittest.TestCase):
    verbose = False
    def setUp(self): pass

    def tearDown(self): pass

def normalize_delimited(src,bnodeIndices,delimiter='\t',quotechar='"'):
    reader = csv.reader(StringIO(src),
                        delimiter=delimiter,
                        quotechar='"',
                        quoting=csv.QUOTE_MINIMAL)
    expected_struct = []
    for rowIdx,row in enumerate(reader):
        newrow = tuple(map(lambda (i,t): '' if (i,rowIdx-1) in bnodeIndices else t,
                       enumerate(row)))
        expected_struct.append(newrow)

    return expected_struct

def test_generator(test_uri, label, queryFile, rdfDoc, result, test_type, debug):
    def test(self):
        print >> sys.stderr, test_uri, label
        query         = urlopen(queryFile).read()
        factGraph     = Graph().parse(urlopen(rdfDoc),format='n3')
        results       = factGraph.query(query)

        bnodeIndices = []
        if results.selected is not None:
            for rowIdx,row in enumerate(results):
                for colIdx,entry in enumerate(row):
                    if isinstance(entry,BNode):
                        bnodeIndices.append((colIdx,rowIdx))

        actual_results = GetResultFormats(results,'../../../htdocs/xslt/',test_type)

        bnodeIndices = []
        if results.selected is not None:
            for rowIdx,row in enumerate(results):
                for colIdx,entry in enumerate(row):
                    if isinstance(entry,BNode):
                        bnodeIndices.append((colIdx,rowIdx))

        assertion     = BNode()
        result_node   = BNode()
        test_graph.add((result_node,RDF.type,EARL.TestResult))
        test_graph.add((result_node,DC['date'],Literal(datetime.date.today())))
        test_graph.add((assertion,RDF.type,EARL.Assertion))
        test_graph.add((assertion,EARL.assertedBy,MY_FOAF.chime))
        test_graph.add((assertion,
                        EARL.subject,
                        URIRef('http://code.google.com/p/akamu/')))
        test_graph.add((assertion,EARL.test,URIRef(test_uri)))
        test_graph.add((assertion,EARL.result,result_node))

        if test_type == TEST_TYPES.JSON:
            result_file = uri_to_os_path(result, attemptAbsolute=True)
            actual=json.load(
                StringIO(actual_results)
            )
            expected=json.load(open(result_file))
            for bindings in [actual,expected]:
                _results = bindings.get(u'results')
                if _results:
                    for binding in _results.get(u'bindings',[]):
                        for value_dict in list(binding.values()):
                            if value_dict[u'type'] == u'bnode':
                                value_dict[u'value'] = u''
            if actual != expected:
                print >> sys.stderr, results.selected
                print >> sys.stderr, actual
                print >> sys.stderr, expected
                test_graph.add((result_node,EARL.outcome,EARL['fail']))
            else:
                test_graph.add((result_node,EARL.outcome,EARL['pass']))
            self.failUnless(actual == expected,"JSON results don't match")
        elif test_type == TEST_TYPES.TSV:
            result_file = uri_to_os_path(result, attemptAbsolute=True)
            expected = open(result_file).read()
            one   = normalize_delimited(actual_results,bnodeIndices)
            other = normalize_delimited(expected,bnodeIndices)
            passes = one == other
            if passes:
                test_graph.add((result_node,EARL.outcome,EARL['pass']))
            else:
                print >> sys.stderr, actual_results
                print >> sys.stderr, result_file
                print >> sys.stderr, expected
                print >> sys.stderr, query
                test_graph.add((result_node,EARL.outcome,EARL['fail']))
            self.failUnless(passes,"TSV results don't match")
        else:
            result_file = uri_to_os_path(result, attemptAbsolute=True)
            expected = open(result_file).read()
            one   = normalize_delimited(actual_results,bnodeIndices,delimiter=',')
            other = normalize_delimited(expected,bnodeIndices,delimiter=',')
            passes = one == other
            if passes:
                test_graph.add((result_node,EARL.outcome,EARL['pass']))
            else:
                print >> sys.stderr, actual_results
                print >> sys.stderr, one
                print >> sys.stderr, result_file
                print >> sys.stderr, expected
                print >> sys.stderr, other
                print >> sys.stderr, query
                test_graph.add((result_node,EARL.outcome,EARL['fail']))
            self.failUnless(passes,"CSV results don't match")
    return test

if __name__ == '__main__':
    from optparse import OptionParser
    op = OptionParser('usage: %prog [options]')
    op.add_option('--profile',
                  action='store_true',
                  default=False,
      help = 'Whether or not to run a profile')
    op.add_option('--singleTest',
      help = 'The short name of the test to run')
    op.add_option('--debug','-v',
                  action='store_true',
                  default=False,
      help = 'Run the test in verbose mode')
    (options, facts) = op.parse_args()
    for test, name, queryFile, rdfDoc, result, test_type in GetTests():
        short_test = test.split('#')[-1]
        print >> sys.stderr, short_test, name, queryFile, rdfDoc, result, test_type
        print >> sys.stderr, test
        if test in SKIP or options.singleTest is not None and \
           options.singleTest != short_test:
            if test in SKIP:
                print >> sys.stderr, "\tSkipping (%s)"%test,SKIP[test]
        else:
            test_name = 'test_%s' % test
            testFn = test_generator(
                        test,
                        name,
                        queryFile,
                        rdfDoc,
                        result,
                        test_type,
                        options.debug)
            setattr(TestSequence, test_name, testFn)
    unittest.TextTestRunner(verbosity=5).run(
        unittest.makeSuite(TestSequence)
    )
    print test_graph.serialize(format='n3')