# -*- coding: utf-8 -*-
#    Copyright 2005 Spike^ekipS <spikeekips@gmail.com>
#
#       This program is free software; you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation; either version 2 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program; if not, write to the Free Software
#    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

import sys
import PyLucene
import nokcene.query
import nokcene.analysis
from util import datetime__

def search_field_to_query (search_fields=(), search_options={}) :
	default_query_operator = search_options.get("operator", "OR").upper()
	default_clause = nokcene.query.boolean_clauses_map.get( \
		default_query_operator, "OR", \
	)

	query = PyLucene.BooleanQuery()
	query.setMaxClauseCount(sys.maxint)

	condition = default_query_operator
	if len(search_fields) > 0 and type(search_fields[0]) in (str, unicode, ) :
		condition = search_fields[0]

	for field in search_fields :
		if type(field) is tuple :
			subquery = search_field_to_query(field, search_options)
		elif type(field) is dict :
			subquery = __field_to_query(field)
		else :
			continue

		query.add( \
			subquery, \
			nokcene.query.boolean_clauses_map.get( \
				condition, default_clause \
			) \
		)

	return query


def __field_to_query (field) :
	index = field["id"]
	value = field["value"]
	__type =  field.get("type", "")
	condition = field.get("condition", "OR")
	usage = field.get("usage", "")

	analyzer = field.get("analyzer", "cjk")
	if (not analyzer or
		analyzer.lower() not in nokcene.analysis.analyzers_map.keys()) :
		analyzer = "cjk"

	analyzer = analyzer.lower()

	if __type.lower() in ("keyword", "multikeyword") :
		subquery = PyLucene.BooleanQuery()

		# FIXME use tokenizer... this sucks...
		if "#" in value:
			values = value.split("#")
		else:
			values = [value]

		# FIXME use tokenizer... this sucks...
		for each in values:
			each = each.replace(":", "_")
			subquery.add(
				PyLucene.TermQuery(PyLucene.Term(index, each)),
				nokcene.query.boolean_clauses_map.get("OR"))

	elif __type.lower() == "path":
		subquery = PyLucene.BooleanQuery()

		# FIXME use tokenizer... this sucks...
		if "#" in value:
			values = value.split("#")
		else:
			values = value.split()

		# FIXME use tokenizer... this sucks...
		for each in values:

			if each.endswith("*") :
				# :FIXME:
				each  = each[:-1]

			# Remove trailing "/" since we remove it at
			# indexation time.
			if each.endswith("/") :
				each  = each[:-1]

			term = PyLucene.Term(index, each)
			ssquery = PyLucene.TermQuery(term)

			subquery.add(
				ssquery,
				nokcene.query.boolean_clauses_map.get("OR")
				)

	elif __type.lower() == "date":
		subquery = None
		start_date = None
		end_date = None
		#start_date = PyLucene.Term(index, datetime__.now().strftime("%F %T"))
		#end_date = PyLucene.Term(index, datetime__.now().strftime("%F %T"))

		if usage and usage != "range:min:max":
			if usage == "range:min" :
				start_date = PyLucene.Term(index, value)
			elif usage == "range:max":
				end_date = PyLucene.Term(index, value)
			else:
				pass

			if start_date is not None or end_date is not None:
				subquery = PyLucene.RangeQuery( start_date, end_date, False)

		else:
			values = value.split("#")
			if len(values) == 2:
				start_date = values[0]
				end_date = values[1]

				subquery = PyLucene.RangeQuery(
					PyLucene.Term(index, start_date),
					PyLucene.Term(index, end_date), True)
			else:
				term = PyLucene.Term(index,  value)
				subquery = PyLucene.TermQuery(term)
	else:
		this_analyzer = nokcene.analysis.getAnalyzerById(analyzer)
		try:
			__parser = PyLucene.QueryParser(index, this_analyzer)
			subquery = __parser.parse(value)
		except PyLucene.JavaError:
			raise

	return subquery

"""
Description
-----------


ChangeLog
---------


Usage
-----


"""

__author__ =  "Spike^ekipS <spikeekips@gmail.com>"
__version__=  "0.1"
__nonsense__ = ""

__file__ = "query_new.py"


