#!/usr/bin/env python

import lxml.html
from lxml.html import parse
import sys
import argparse
import re
import string

skos_url = 'http://www.w3.org/2009/08/skos-reference/skos.rdf'
rdf_url = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
rdfs_url = 'http://www.w3.org/2000/01/rdf-schema#'

url_pattern = re.compile('https?://\w')

empty_trans = string.maketrans('', '')

strict = False
debug = False
ignore_non_matches = False

def cleanup_string(s):
	"""
	removes newline characters from a string and encodes it in utf-8
	Arguments:
		s: A string that is to be cleaned.
	Returns:
		A string without newline chars, encoded in utf-8.
	"""
	s = s.encode('utf-8')
	s = s.translate(empty_trans, '\n')
	return s

def skosify(resource, label, broaders, narrowers, rdftypes):
	"""
	Creates a SKOS structure in RDF+N3 format for
	a given resource. 
	Currently, the preferred label, broaders and narrowers relations
	and additional rdftypes can be handled.
	The skosify function returns a string containing the SKOS
	thesaurus for one resource.
	If no broader relation has been defined, the resource
	is automatically a skos:TopConcept

	Arguments:
		resource: A string that is the subject resource or skos:Concept.
		label: A string that defines the skos:preferredLabel.
		broaders: A list of strings defining resources with skos:broader relations.
		narrowers: A list of strings defining resources with skos:narrower relations.
		rdftypes: A list of strings defining additional rdf:type relations for the resource.
	Returns:
		A string that represents the SKOS thesaurus in RDF/N3 format.
	"""
	skos_nodes = [resource]

	# AAA: Since you add to skos_nodes things with a relatively similar
	# format, I figured I'd just define a nested function that does it.
	def add_entry(entry_type, content, suffix=';'):
		"""Add an entry to skos_nodes."""
		content = cleanup_string(content)
		if url_pattern.match(content):
			content = '<%s>' % content
		skos_nodes.append('		%s %s %s \n' % (
			entry_type, content, suffix))

	def add_entries_from_list(entry_type, l):
		if l is not None:
			for f in filter(None, l):
				add_entry(entry_type, f)

	add_entry('a', 'skos:Concept')
	add_entries_from_list('a', rdftypes)

	if broaders:
		add_entries_from_list('skos:broader', broaders)
	else:
		# If it doesn't have broaders, it is a skos:TopConcept 
		add_entry('a', 'skos:TopConcept')

	add_entries_from_list('skos:narrower', narrowers)
	add_entry('skos:prefLabel', '"%s"' % label, suffix='.')

	return ''.join(skos_nodes)


def extractAndPrefixText(element, prefix):
	"""
	Calls extractText and prefixText for a given ElementNode 
	and a string prefix.

	Arguments: 
		element: An ElementTree node from which the string content can be extracted.
		prefix: A string that is appended to the beginning of the element text.
	Returns:
		A string that is constructed by appending the element text to the prefix.
	"""
	text = extractText(element)
	return prefixText(text, prefix)

def extractText(element):
	"""
	Extracts and returns the text content of an ElementTree node.

	Arguments:
		element: The ElementTree node.
	Returns:
		A string that is the text from the node or None.
	"""
	if element is not None:
		textelements = list()
		for t in element.itertext():
			textelements.append(t)
		text = ''.join(textelements)
		return text
	return None

def prefixText(string, prefix):
	"""
	Concatenates a given string with a prefix and returns it.

	Arguments:
		string: A string to which the prefix will be added.
		prefix: A string that is the prefix.
	Returns:
		A string that concatenated the prefix and the input string or None.
	"""
	if string is not None and len(string) != 0:
		return '%s%s' % (prefix or '', string)
	return None

def splitXpathAndRegex(string):
	if string is None or len(string) == 0:
		return None, None
	slist = string.split('#',1)
	if len(slist) == 1:
		return slist[0], None
	if len(slist) == 2:
		return slist[0], slist[1]

def parseCmdlineArgs(argv):
	"""
	Parses a given list of strings and returns a dictionary according to the 
	expected command line arguments. It also checks whether all necessary arguments
	have been provided.
	Arguments:
		argv: A string that contains the command line arguments
	Returns:
		A dictionary containing the command line argument names and values.
	"""
	global debug
	global ignore_non_matches

	parser = argparse.ArgumentParser(description='Skosify XML/HTML document by providing XPaths to the resource, label, broader- and narrower relations')	
	
	parser.add_argument('-i', '--input', nargs=1, type=str, help='Relative or absolute path to a document or url to parse')
	parser.add_argument('-x', '--xpath', nargs=1, type=str, help='XPath to the base element')
	parser.add_argument('-r', '--resource', nargs=1, type=str, help='XPath to the resource that become skos concepts, relative to the base element')
	parser.add_argument('-b', '--broader', nargs=1, type=str, help='XPath to the resource broader relation, relative to the base element')
	parser.add_argument('-n', '--narrower', nargs=1, type=str, help='XPath to the resource narrower relations, relative to the base element')
	parser.add_argument('-l', '--label', nargs=1, type=str, help='XPath to the resource labels, relative to the base element')
	parser.add_argument('-t', '--addtype', nargs='+', type=str, help='Additional rdf types for the resource')
	parser.add_argument('-s', '--namespace', nargs=1, type=str, help='Namespace for the resources')
	parser.add_argument('-d', '--debug', action='store_true', default=False, help='Debug wrapper')
	parser.add_argument('-g', '--ignore_non_matches', action='store_true', default=False, help='Ignores xpath entries if regular expressions are used that do not match the result')
	parser.add_argument('-G', '--ignore_path', nargs=1, type=str, help='XPath to elements in the xml tree that should be ignored')
	
	ns = parser.parse_args(argv)
	cmdline_args = ns.__dict__


	cmdline_args['resource'][0], regex_r = splitXpathAndRegex(cmdline_args['resource'][0])
	cmdline_args['broader'][0], regex_b = splitXpathAndRegex(cmdline_args['broader'][0])
	cmdline_args['narrower'][0], regex_n = splitXpathAndRegex(cmdline_args['narrower'][0])
	cmdline_args['label'][0], regex_l = splitXpathAndRegex(cmdline_args['label'][0])

	# Maps names of required flags in the parser to names in output.
	arguments = {
		'input': 'inputFile',
		'xpath': 'xpath',
		'resource': 'xpath_r',
		'broader': 'xpath_b',
		'narrower': 'xpath_n',
		'label': 'xpath_l',
	}

	output = dict(
		(name, None) for name in (arguments.values() + 
		['regex_r', 'regex_b', 'regex_n', 'regex_l', 'rdftypes', 'namespace', 'ignore_path']))

	for a, b in arguments.items():
		# TODO: We can probably get rid of the len, based on the nargs
		# parameter above.
		if not cmdline_args[a] or len(cmdline_args[a]) != 1:
			# TODO: Investigate if it's possible to mark the
			# arguments as required above.
			print 'Required parameter missing: %s' % a
			sys.exit(2)
		else:
			output[b] = cmdline_args[a][0]


	if cmdline_args['addtype'] and len(cmdline_args['addtype']) >= 1:
		output['rdftypes'] = cmdline_args['addtype']
	if cmdline_args['namespace'] and len(cmdline_args['namespace']) == 1:
		output['namespace'] = cmdline_args['namespace'][0]
	if cmdline_args['ignore_path'] and len(cmdline_args['ignore_path']) == 1:
		output['ignore_path'] = cmdline_args['ignore_path'][0]
	if regex_r:
		output['regex_r'] = regex_r
	if regex_b:
		output['regex_b'] = regex_b
	if regex_n:
		output['regex_n'] = regex_n
	if regex_l:
		output['regex_l'] = regex_l
	if cmdline_args['ignore_non_matches']:
		ignore_non_matches = cmdline_args['ignore_non_matches']
	if cmdline_args['debug']:
		debug = cmdline_args['debug']


	return output


def mapXpaths(xpath, elements):
	"""
	Returns the elements of an XPath query evaluation on a set of elements.
	Arguments:
		elements: A set of ElementTree nodes on which the query is executed.
		xpath: An XPath query string.
	Returns:
		The ElementTree nodes that were returned after the XPath query 
		evaluation for each input element.
	"""
	for e in elements:
		yield e.xpath(xpath)

def mapXpathsFirst(xpath, elements):
	"""
	Returns the first elements of an XPath query evaluation on a set of elements.
	Arguments:
		elements: A set of ElementTree nodes on which the query is executed.
		xpath: An XPath query string.
	Returns:
		The first ElementTree node from the list of elements
		that were returned after the XPath query evaluation for 
		each input element.
	"""
	for e in mapXpaths(xpath, elements):
		if e is not None and len(e) >= 1:
			yield e[0]

def extractRegexGroupIndex(regex):
	ilist = regex.split('#', 1)
	if regex is None:
		return None
	if len(ilist) == 1:
		return ilist[0], None
	if len(ilist) == 2:
		return ilist[0], ilist[1]

def regexGroup(regex, content):
	"""Returns a list of strings that match the regex on the content"""
	regex, index = extractRegexGroupIndex(regex)
	if index is None:
		print 'Please provide a group index for the regex evaluation.'
		sys.exit(2)
	m = re.match(regex, content)
	if m is None:
		if not ignore_non_matches:
			print 'Could not find match for regex %s in %s' % (regex,content)
			sys.exit(2)
		else:
			if strict:
				print 'Warning: Content %s did not match regex %s' % (content,regex)
			return None
	g = m.group(int(index))
	return g


#########################################################
# main():
#########################################################
def main(argv):
	# parse command line arguments
	arguments = parseCmdlineArgs(argv)
	
	if debug:
		print 'Debug enabled '

	# is the xpath argument requesting the value of an attribute?
	xpath_attribute_value_pattern = re.compile('.*@(\w*)$')
	
	# values retrieved from command line arguments
	inputFile = arguments['inputFile']
	
	xpath = arguments['xpath']
	xpath_r = arguments['xpath_r']
	xpath_b = arguments['xpath_b']
	xpath_n = arguments['xpath_n']
	xpath_l = arguments['xpath_l']
	
	regex_r = arguments['regex_r']
	regex_b = arguments['regex_b']
	regex_n = arguments['regex_n']
	regex_l = arguments['regex_l']

	rdftypes = arguments['rdftypes']
	namespace = arguments['namespace']

	ignore_path = arguments['ignore_path']

	prefix = ''

	print '@prefix rdf: <' + rdf_url + '>.'
	print '@prefix rdfs: <' + rdfs_url + '>.'
	print '@prefix skos: <' + skos_url + '>.'

	# extract prefix and url from namespace if available
	if namespace:
		prefix, url = namespace.split(':',1)
		if prefix in ['http', 'https']:
			prefix = namespace
		elif prefix:
			prefix += ':'
			print '@prefix ' + prefix + ' <' + url + '>.'

	# parse the input file
	tree = parse(inputFile)

	# delete all elements that should be ignored
	if ignore_path is not None and len(ignore_path) != 0:
		if debug:
			print 'Ignoring path: ' + ignore_path
		ignore_xpath, depth = ignore_path.split('#',1)
		for n in tree.xpath(ignore_xpath):
			for d in range(int(depth)):
				n = n.getparent()
			n.getparent().remove(n)
		
	# ... and extract all the nodes on which we execute relative queries
	base = tree.xpath(xpath)

	# query for the resources
	resource_elements = mapXpathsFirst(xpath_r, base)

	if xpath_attribute_value_pattern.match(xpath_r):
		# The element has been extracted from an element attribute e.g.
		# <element text="bla" />.  The element is already a string.
		if regex_r is not None: 
			resources = [regexGroup(regex_r, r) and \
			prefix + regexGroup(regex_r,r) for r in resource_elements]
			resources = filter(None, resources)
		else:
			resources = [r and prefix + r for r in resource_elements]

	else:
		# The element holds a text content, e.g. <element> bla
		# </element>,  The text must be extracted from the ElementNode.
		if regex_r is not None: 
			resources = [regexGroup(regex_r, extractText(r)) and \
			prefix + regexGroup(regex_r,extractText(r)) for r in resource_elements]
			resources = filter(None, resources)
		else:
			resources = [extractText(r) and prefix + extractText(r) for r in resource_elements]

	# query for the skos preferred labels
	label_elements = mapXpathsFirst(xpath_l, base)

	if xpath_attribute_value_pattern.match(xpath_l):
		# The element has been extracted from an element attribute e.g.
		# <element text="bla" />.
		if regex_l is not None: 
			labels = [regexGroup(regex_l,l) and \
			regexGroup(regex_l,l) for l in label_elements]
			labels = filter(None, labels)
		else:
			labels = label_elements
	else:
		# The element holds a text content. e.g. <element> bla
		# </element>.  The text must be extracted from the ElementNode.
		if regex_l is not None: 
			labels = [regexGroup(regex_l,extractText(l)) and \
			regexGroup(regex_l,extractText(l)) for l in label_elements]
			labels = filter(None, labels)
		else:
			labels = [l.text for l in label_elements]

	# query for the superclasses. Note that there might be multiple 
	# skos:broader relations for one resource. Therefore, we are constructing
	# lists of lists of superclasses.
	superclass_elements = mapXpaths(xpath_b, base)
	superclasses = []

	# WE HAVE REVIEWED UNTIL HERE.  MUAHAHAHAH.

	# check whether the element holds a text content, e.g. <element> bla </element>,
	# or whether it has been extracted from an element attribute
	# e.g. <element text="bla" />.
	# We need to differentiate because in the first case, the text must be extracted
	# from the ElementNode, whereas in the second case, the Element is already a string
	if xpath_attribute_value_pattern.match(xpath_b):
		# for each list in the list of superclasses, 
		# we append a prefix to the node name
		for element in superclass_elements:
			if regex_b is not None:
				superclass_list = [regexGroup(regex_b,s) and \
				prefix + regexGroup(regex_b,s) for s in element]
				superclass_list = filter(None, superclass_list)
			else:
				superclass_list = [prefix + s for s in element]
			superclasses.append(superclass_list)

	else:
		# for each list in the list of superclasses, 
		# we extract and append a prefix to the node name
		for element in superclass_elements:
			if regex_b is not None:
				superclass_list = [regexGroup(regex_b,extractText(s)) and \
				prefix + regexGroup(regex_b,extractText(s)) for s in element]
				superclass_list = filter(None, superclass_list)
			else:
				superclass_list = [extractText(s) and prefix + extractText(s) for s in element]
			superclasses.append(superclass_list)

	# query for the subclasses. Note that there might be multiple 
	# skos:narrower relations for one resource. Therefore, we are constructing
	# lists of lists of subclasses.
	subclass_elements = mapXpaths(xpath_n, base)
	subclasses = []
	
	# check whether the element holds a text content, e.g. <element> bla </element>,
	# or whether it has been extracted from an element attribute
	# e.g. <element text="bla" />.
	# We need to differentiate because in the first case, the text must be extracted
	# from the ElementNode, whereas in the second case, the Element is already a string
	if xpath_attribute_value_pattern.match(xpath_n):
		# for each list in the list of subclasses, 
		# we append a prefix to the node name
		for element in subclass_elements:
			if regex_n is not None:
				subclass_list = [regexGroup(regex_n,s) and \
				prefix + regexGroup(regex_n,s) for s in element]
				subclass_list = filter(None, subclass_list)
			else:
				subclass_list = [prefix + s for s in element]
			subclasses.append(subclass_list)

	else:
		# for each list in the list of subclasses, 
		# we extract and append a prefix to the node name
		for element in subclass_elements:
			if regex_n is not None:
				subclass_list = [regexGroup(regex_n,extractText(s)) and \
				prefix + regexGroup(regex_n,extractText(s)) for s in element]
				subclass_list = filter(None, subclass_list)
			else:
				subclass_list = [extractText(s) and prefix + extractText(s) for s in element]
			subclasses.append(subclass_list)
	
	# rdftypes is optional. If no rdftype is given, 
	# we construct a list of None types to give it to the skosify method
	if rdftypes is None or len(rdftypes) == 0:
		rdftypes = [None]*len(resources)
	else:
		rdftypes = [rdftypes]*len(resources)

	skoses = map(skosify, resources, labels, superclasses, subclasses, rdftypes)
	for s in skoses:
		print s

	sys.exit(0)
#end def main()

if __name__ == "__main__":
	main(sys.argv[1:])
