#################################################################################
#
# Copyright (c) 2006 Michigan State University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
#################################################################################

#################################################################################
#
#	Author:
#		Nathan Collins <npcollins@gmail.com>
#
#################################################################################

import re
import os
import types

class termObj:
	"""
	Term object is simple phrase or word.
	
	test
	"hello world"
	"""
	def __init__(self):
		# initialize value
		self.value = ""
	
	def set(self, value):
		"""
		Set the value for the object
		"""
		
		# verify value is a string
		if isinstance(value, types.StringTypes):
			self.value = value

		else:
			# not a string
			raise "Invalid term: %s" % value

	def isPhrase(self):
		"""
		Whether or not this has multiple terms.
		"""
		if " " in self.value:
			return True
		return False

	def toLucene(self):
		"""
		Convert contents and children to a lucene query string
		"""

		# default returns plain phrase
		lucene = '%s ' % self.value

		# if this is has spaces the it's a phrase
		if self.isPhrase():
			lucene = '"%s" ' % self.value
		
		return lucene

	def toRegularExpression(self):
		"""
		Convert to a string used to match word in a regular expression
		"""
		return self.value


class wildtermObj(termObj):
	"""
	Term object is a phrase or word, but may contain wildcards * or ?
	
	te?t
	test*
	te*t?
	"""

	def toRegularExpression(self):
		# change wildcards for using in regexp
		regexp = re.sub("(\?|\*)", "\\w\\1", self.value)

		return regexp


class booleanObj:
	"""
	Boolean object can be include(+) exclude(-) and(AND,&&) or(OR,||) not(!,NOT)
	"""

	def __init__(self):
		self.value = ""

	def set(self, value):
		# set the boolean type
		if value in ("+","-","&&","||","!","AND","OR","NOT"):
			self.value = value

		else:
			# invalid boolean
			raise "Invalid boolean: %s" % value

	def toLucene(self):
		if self.value in ("+","-"):
			lucene = self.value
		else:
			lucene = "%s " % self.value

		return lucene


class fieldObj:
	"""
	Field object defines fieldname
	"""

	def __init__(self):
		self.value = ""

	def set(self, value):
		self.value = value

	def toLucene(self):
		lucene = self.value + ":"

		return lucene


class baseObj:
	"""
	Base object. One class to hold them all...
	"""
	def __init__(self):
		self.values = []

	def add(self,value):
		self.values.append(value)

	def lastType(self):
		# return the type of the last class stored in the base object

		# the last object added
		lastobj = self.values[-1]

		typestr = getObjType(lastobj)

		return typestr

	def getLast(self):
		# return the last item in the base
		return self.values[-1]

	def toLucene(self):
		lucene = ""

		for val in self.values:
			# if a modifier, trim the last whitespace away before adding
			if getObjType(val) == "modify":
				lucene = lucene.strip()

			lucene += val.toLucene()

		return lucene.strip()


class groupObj(baseObj):
	"""
	Grouping object
	"""
	def toLucene(self):
		lucene = ""

		for val in self.values:
			lucene += val.toLucene()

		return "(%s) " % lucene


class rangeObj:
	"""
	Ranged object
	inclusive -> field:[20020101 TO 20030101]
	exclusive -> field:{Aida TO Carmen}
	"""
	def __init__(self):
		self.start = ""
		self.end = ""
		self.exclusive = -1

	def set(self, start, end, exclusive = 0):
		self.start = start
		self.end = end
		self.exclusive = exclusive

	def toLucene(self):
		lucene = ""

		# range never set
		if self.exclusive < 0: return lucene

		if self.exclusive:
			lucene = "{%s TO %s} " % (self.start, self.end)
		else:
			lucene = "[%s TO %s] " % (self.start, self.end)

		return lucene


class modifyObj:
	"""
	Modify object. For boosts, fuzzy, and proximity modifiers
	"""
	def __init__(self):
		# initialize
		self.value = ""
		self.modify = ""

	def set(self, value, modify):
		"""
		Must be of type:
		^5 (boost)
		~ (fuzzy, default 0.5, only usable on single term)
		~0.3 (fuzzy)
		~8 (proximity, only usable on phrases)
		"""
		self.value = value
		self.modify = modify

	def toLucene(self):
		lucene = ""

		lucene = "%s%s " % (self.value, self.modify)

		return lucene


def getObjType(searchobj):
	"""
	Given an instance of a search parse class, return a string with its
	type. Empty string if no match.
	"""
	typestr = ""
	
	if isinstance(searchobj, termObj):
		typestr = "term"

	elif isinstance(searchobj, wildtermObj):
		typestr = "wildterm"

	elif isinstance(searchobj, booleanObj):
		typestr = "boolean"

	elif isinstance(searchobj, fieldObj):
		typestr = "field"

	elif isinstance(searchobj, baseObj):
		typestr = "base"

	elif isinstance(searchobj, groupObj):
		typestr = "group"

	elif isinstance(searchobj, rangeObj):
		typestr = "range"

	elif isinstance(searchobj, modifyObj):
		typestr = "modify"

	return typestr


def mergeDictList(one, two):
	"""
	Take two dicts of lists, and combine them into one.
	"""

	terms = {}
	terms['keywords'] = []
	terms['mTitle'] = []
	terms['mCreator'] = []
	terms['mDescription'] = []
	terms['mSubject'] = []

	for k in one.keys():
		terms[k] = one[k]

	for k in two.keys():
		if terms.has_key(k):
			terms[k] += two[k]

		else:
			terms[k] = two[k]

	return terms


def removeUnwantedTerms(searchstr):
	"""
	Given a lucene search string, remove all unwanted terms (NOT, !, -)
	and return the modified search string.
	"""

	# field searches (not perfect, a recursive solution would be better)
	searchstr = re.sub('\s*(-|NOT |! |!)\w+:(".*?[^\\\\]"\s*|\w[\w?*]*\s*|\(.+?\))', ' ', searchstr)

	# terms/wildterms
	searchstr = re.sub('\s*(-|NOT |! |!)\w[\w?*]*\s*', ' ', searchstr)

	# phrases
	searchstr = re.sub('\s*(-|NOT |! |!)".*?[^\\\\]"\s*', ' ', searchstr)

	return searchstr


def getLuceneTerms(searchstr):
	"""
	Given a valid lucene search string, parse a dict containing lists
	for each field, defaulting terms into 'keywords'
	"""

	# remember the original length of the search
	searchlen = len(searchstr)

	# the return dict
	terms = {}
	terms['keywords'] = []
	terms['mTitle'] = []
	terms['mCreator'] = []
	terms['mDescription'] = []
	terms['mSubject'] = []

	# we are not concerned with exclusions being in the return dict as those words should not occur anyways - will rethink this later

	while len(searchstr) > 0:

		# trim useless syntax
		if searchstr[0] in (' ','+','-','!'):
			searchstr = searchstr[1:]

		elif searchstr[0:2] in ('&&','||','OR'):
			searchstr = searchstr[2:]

		elif searchstr[0:3] in ('AND','NOT'):
			searchstr = searchstr[3:]

		# if next is a field identifier
		elif re.match("\w+:",searchstr):
			# parse field
			bounds = re.match("\w+:",searchstr).span()
			field = searchstr[bounds[0]:bounds[1]-1]

			searchstr = searchstr[bounds[1]:]

			# trim useless syntax
			trimming = 1
			while trimming:
				if searchstr[0] in (' ','+','-','!'):
					searchstr = searchstr[1:]
					continue

				elif searchstr[0:2] in ('&&','||','OR'):
					searchstr = searchstr[2:]
					continue

				elif searchstr[0:3] in ('AND','NOT'):
					searchstr = searchstr[3:]
					continue

				trimming = 0

			# if next is term, parse and add to field
			if re.match("^\w[\w?*']*",searchstr):
				# match the word
				bounds = re.match("^[\w?*']+",searchstr).span()
	
				# pull the term
				phrase = searchstr[bounds[0]:bounds[1]]
	
				# check for wildcards in phrase
				if "?" in phrase or "*" in phrase:
					phrase = re.sub("(\?|\*)", "\\w\\1", phrase)

				if terms.has_key(field):
					terms[field] += [phrase]
				else:
					terms[field] = [phrase]

				searchstr = searchstr[bounds[1]:]

			# if next is phase, parse and add to field
			elif searchstr[0] == '"':
				# first non-escaped " will be the closing "
				match = re.match('".*?[^\\\\]"', searchstr)

				# if we matched the phrase
				if match is not None:
					# get the bounds
					bounds = match.span()
	
					# pull the quoted phrase
					phrase = searchstr[bounds[0]:bounds[1]]
	
					# remove quotes
					phrase = phrase[1:-1]
	
					# check for wildcards in phrase
					if "?" in phrase or "*" in phrase:
						phrase = re.sub("(\?|\*)", "\\w\\1", phrase)

					if terms.has_key(field):
						terms[field] += [phrase]
					else:
						terms[field] = [phrase]

					searchstr = searchstr[bounds[1]:]

				else:
					break

			# if next is group, parse and merge
			elif searchstr[0] == '(':
				# remove opening (
				searchstr = searchstr[1:]

				# parse middle of group
				mid, parsed = getLuceneTerms(searchstr)

				# trim parsed off string
				searchstr = searchstr[parsed:]

				# merge middle results with dict
				if terms.has_key(field):
					terms[field] = terms[field] + mid['keywords']
				else:
					terms[field] = mid['keywords']

			# otherwise
			else:
				break

		# if opening ", find closing ". check for wildcards
		elif searchstr[0] == '"':
			# first non-escaped " will be the closing "
			match = re.match('".*?[^\\\\]"', searchstr)

			# if we matched the phrase
			if match is not None:
				# get the bounds
				bounds = match.span()

				# pull the quoted phrase
				phrase = searchstr[bounds[0]:bounds[1]]

				# remove quotes
				phrase = phrase[1:-1]

				# check for wildcards in phrase
				if "?" in phrase or "*" in phrase:
					phrase = re.sub("(\?|\*)", "\\w\\1", phrase)

				terms['keywords'] += [phrase]

				searchstr = searchstr[bounds[1]:]

			else:
				break

		# if opening (, find matching ). parse contents
		elif searchstr[0] == '(':
			# trim off group open
			searchstr = searchstr[1:]

			subterms, parsed = getLuceneTerms(searchstr)

			terms = mergeDictList(terms,subterms)

			searchstr = searchstr[parsed:]

		# if closing, then return to (hopefully) an opening
		elif searchstr[0] == ')':
			searchstr = searchstr[1:]
			break

		# if opening range { or [, find matching ] or }. range objects don't bold well
		elif searchstr[0] in ('[','{'):
			# pull the values from the range
			match = re.match(r'[\[{]\s*(\w+)\s+TO\s+(\w+)\s*[\]}]', searchstr)

			if match is None:
				break

			bounds = match.span()

			searchstr = searchstr[bounds[1]:]

		# fuzzy, proximity, and boost modifiers. don't mess with fuzzy
		elif searchstr[0] in ('^','~'):
			# remove the parameter
			match = re.match('[~^]\d*.\d*',searchstr)

			if match is not None:
				# trim off whole modifier
				bounds = match.span()
				searchstr = searchstr[bounds[1]:]

			else:
				# just the mod symbol
				searchstr = searchstr[1:]

			# boost and fuzzy are left alone

			# proximity, break up the phrase into terms
			# todo

		# if next is a word/number (cannot begin with wildcards)
		elif re.match("^\w[\w?*']*",searchstr):
			# match the word
			match = re.match("^[\w?*']+",searchstr)

			# find the bounds
			bounds = match.span()

			# pull the term
			phrase = searchstr[bounds[0]:bounds[1]]

			# check for wildcards in phrase
			if "?" in phrase or "*" in phrase:
				phrase = re.sub("(\?|\*)", "\\w\\1", phrase)

			terms['keywords'] += [phrase]

			searchstr = searchstr[bounds[1]:]

		# otherwise, unknown or invalid
		else:
			# not crucial, just break out and return what we have already
			break

	return terms, searchlen - len(searchstr)


def parseLuceneSearch(searchstr):
	"""
	Take a lucene search string and return a search object
	"""

	# remember the original length of the search
	searchlen = len(searchstr)

	# base obj
	base = baseObj()

	# process search string
	while len(searchstr) > 0:
		# if leading space, trim it
		if searchstr[0] == ' ':
			searchstr = searchstr[1:]

		# if opening ", find closing ". check for wildcards, then create (wild)termObj
		elif searchstr[0] == '"':
			# first non-escaped " will be the closing "
			match = re.match('".*?[^\\\\]"', searchstr)

			# if we matched the phrase
			if match is not None:
				# get the bounds
				bounds = match.span()

				# pull the quoted phrase
				phrase = searchstr[bounds[0]:bounds[1]]

				# remove quotes
				phrase = phrase[1:-1]

				# check for wildcards in phrase
				if "?" in phrase or "*" in phrase:
					# wildcard term
					term = wildtermObj()
					# set the value of the term
					term.set(phrase)
					# add to search obj list
					base.add(term)
				else:
					# normal term
					term = termObj()
					# set the value of the term
					term.set(phrase)
					# add to search obj list
					base.add(term)

			else:
				# we have an opening " without a closing ". search is invalid
				raise 'Opening quote(") without a closing quote.'

			# trim parsed data off search string
			searchstr = searchstr[bounds[1]:]

		# if opening (, find matching ). parse contents and put them in a groupObj
		elif searchstr[0] == '(':
			# create grouping object
			group = groupObj()

			# trim opening paren
			searchstr = searchstr[1:]

			# parse insides
			contents, parsed = parseLuceneSearch(searchstr)

			# add results of parse to group
			group.add(contents)

			# add group to base object
			base.add(group)

			#trim parsed data off search string
			searchstr = searchstr[parsed:]

		# if closing, then return to (hopefully) an opening
		elif searchstr[0] == ')':
			searchstr = searchstr[1:]
			break

		# if opening range { or [, find matching ] or }. parse components into rangeObj
		elif searchstr[0] in ('[','{'):
			# detect inclusive or exclusive
			exclusive = 0
			# the opening bracket defines the type
			if searchstr[0] == '{':
				exclusive = 1

			# pull the values from the range
			match = re.match(r'[\[{]\s*(\w+)\s+TO\s+(\w+)\s*[\]}]', searchstr)

			if match is None:
				# invalid range match
				raise "Invalid range syntax: %s" % searchstr[:30]

			# get the data from the match
			groups = match.groups()
			bounds = match.span()

			# pull the two limits
			start = groups[0]
			end = groups[1]

			# the range container
			robj = rangeObj()
			robj.set(start, end, exclusive)

			# add to base
			base.add(robj)

			# trim search string of the parsed data
			searchstr = searchstr[bounds[1]:]

		# if boolean operator
		elif searchstr[0] in ('+','-','!') or searchstr[0:3] in ('AND','NOT') or searchstr[0:2] in ('OR','||','&&'):
			# the boolean object
			bool = booleanObj()

			# check length to pull
			if searchstr[0] in ('+','-','!'):
				boollen = 1
			elif searchstr[0:2] in ('OR','||','&&'):
				boollen = 2
			else: # AND, NOT
				boollen = 3

			# set bool value
			bool.set(searchstr[0:boollen])

			# add to base
			base.add(bool)

			# trim parsed data off search string
			searchstr = searchstr[boollen:]

		# fuzzy, proximity, and boost modifiers
		elif searchstr[0] in ('^','~'):
			# ^5 (boost)
			# ~ (fuzzy, default 0.5, only usable on single term)
			# ~0.3 (fuzzy)
			# ~8 (proximity, only usable on phrases)

			# mod type
			mod = searchstr[0]

			# modify object
			mobj = modifyObj()

			# pull a value for the modify, if present
			match = re.match('[~^]\d*(\.\d+)?', searchstr)

			# bounds for the value
			bounds = match.span()

			# pick the value out
			val = searchstr[1:bounds[1]]

			# if tilde modifier
			if mod == '~':
				# decide fuzzy or proximity or invalid
				if base.lastType() == "term":
					# if this is a phase
					if base.getLast().isPhrase():
						# then it is a proximity mod

						# verify an integer value
						if len(val) < 1 or '.' in val:
							# invalid value
							raise "Invalid value for ~ modifier: %s" % searchstr[:20]
						else:
							# proximity mod
							mobj.set("~", val)

					else:
						# otherwise a fuzzy mod
						mobj.set("~", val)

				else:
					# not a term, can't fuzzy/proximity this
					raise "Cannot apply ~ modifier: %s" % searchstr[:20]

			# else carrot modifer
			else:
				# verify last type non-phase term
				if base.lastType() == "term" and not base.getLast().isPhrase():
					# if the value is not empty
					if val != "":
						mobj.set("^", val)

					else:
						raise "Modifier ^ has no value: %s" % searchstr[:20]

				else:
					# invalid boost
					raise "Cannot apply ^ modifier: %s" % searchstr[:20]

			# add modifier to base
			base.add(mobj)

			# trim parsed data off search string
			searchstr = searchstr[bounds[1]:]

		# if next is a field identifier
		elif re.match("\w+:",searchstr):
			# match the field
			match = re.match("\w+:",searchstr)

			# find the bounds
			bounds = match.span()

			# pull the field
			field = searchstr[bounds[0]:bounds[1] - 1]

			# field object
			fobj = fieldObj()
			# set the value of the field object
			fobj.set(field)
			# add to search obj list
			base.add(fobj)

			# trim parsed data off search string
			searchstr = searchstr[bounds[1]:]

		# if next is a word/number (cannot begin with wildcards)
		elif re.match("^\w[\w?*']*",searchstr):
			# match the word
			match = re.match("^[\w?*']+",searchstr)

			# find the bounds
			bounds = match.span()

			# pull the term
			phrase = searchstr[bounds[0]:bounds[1]]

			# check for wildcards in phrase
			if "?" in phrase or "*" in phrase:
				# wildcard term
				term = wildtermObj()
				# set the value of the term
				term.set(phrase)
				# add to search obj list
				base.add(term)
			else:
				# normal term
				term = termObj()
				# set the value of the term
				term.set(phrase)
				# add to search obj list
				base.add(term)

			# trim parsed data off search string
			searchstr = searchstr[bounds[1]:]

		# otherwise, unknown or invalid
		else:
			raise "Parse error starting at: %s" % searchstr[:30]

	# return base obj, length of string parsed
	return base, searchlen - len(searchstr)


# eof
