# -*- coding: utf-8 -*-
"""
 Copyright 2008 Spike^ekipS <spikeekips@gmail.com>

	This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
 the Free Software Foundation; either version 2 of the License, or
 (at your option) any later version.

 This program is distributed in the hope that it will be useful,
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details.

 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
"""

try :
	import lucene
	env = lucene.initVM(lucene.CLASSPATH)
except ImportError :
	pass

if not globals().has_key("lucene") :
	try :
		import PyLucene as lucene
	except :
		raise ImportError, "Install PyLucene module. Visit http://pylucene.osafoundation.org/."

import time, tempfile, shutil
from django.conf import settings

import core

######################################################################
# Constants
QUERY_BOOLEANS = {
	"AND"	: lucene.BooleanClause.Occur.MUST,
	"OR"	: lucene.BooleanClause.Occur.SHOULD,
	"NOT"	: lucene.BooleanClause.Occur.MUST_NOT,
	True	: lucene.BooleanClause.Occur.MUST_NOT,
	False	: lucene.BooleanClause.Occur.MUST,
}

QUERY_OPERATORS = {
	"AND": lucene.QueryParser.Operator.AND,
	"OR" : lucene.QueryParser.Operator.OR,
}

MAXINT = int(2**31-1)


def initialize_vm () :
	lucene.getVMEnv().attachCurrentThread()

def deinitialize_vm () :
	lucene.getVMEnv().detachCurrentThread()

class __LUCENE__ (object) :

	def __init__ (self, storage_path=None, storage_type=None, ) :
		self.default_storage_path = settings.SEARCH_STORAGE_PATH
		self.default_storage_type = settings.SEARCH_STORAGE_TYPE

		self.storage_path = storage_path
		self.storage_type = storage_type

		if not self.storage_path :
			self.storage_path = self.default_storage_path

		if not self.storage_type :
			self.storage_type = self.default_storage_type

		self.storage = self.open_storage(
			self.storage_path,
			storage_type=storage_type,
		)
		self.writer = None
		self.reader = None
		self.searcher = None
		self.storage_tmp = None
		self.storage_path_tmp = None

	def get_analyzer (self) :
		return lucene.CJKAnalyzer()

	def is_locked (self) :
		return lucene.IndexReader.isLocked(self.default_storage_path)

	##################################################
	# Open & close
	def close (self) :
		self.close_writer()
		self.close_reader()
		self.close_searcher()
		self.close_storage()

	def open_storage (self, storage_path=None, storage_type=None, ) :
		if not storage_path :
			storage_path = self.storage_path

		if not storage_type :
			storage_type = self.storage_type

		return self.__open_storage(self.storage_path, storage_type, )

	def __open_storage (self, storage_path, storage_type, ) :
		if settings.DEBUG > 1 :
			print "[II] open storage: %s, %s" % (
				storage_type,
				self.storage_path,
			)

		if storage_type == "ram" :
			d = lucene.RAMDirectory()
			writer = self.open_writer(create=True, storage=d)
			writer.close()
			return d
		else :
			return lucene.FSDirectory.getDirectory(storage_path, False,)

	def close_storage (self) :
		if self.storage :
			self.storage.close()
			self.storage = None

	def open_searcher (self) :
		if self.searcher :
			self.searcher.close()

		return lucene.IndexSearcher(self.storage)

	def close_searcher (self) :
		if self.searcher :
			self.searcher.close()
			self.searcher = None

	def open_writer (self, create=False, storage=None, ) :
		return lucene.IndexWriter(
			storage and storage or self.storage,
			self.get_analyzer(),
			create,
		)

	def close_writer (self) :
		if self.writer :
			self.writer.close()

			if self.storage_tmp : # merge temp storage
				while self.is_locked() :
					time.sleep(0.2)

				self.writer = self.open_writer()

				print ">> tmp,", self.storage_tmp
				print ">> org,", self.writer.getDirectory()

				self.writer.addIndexes([self.storage_tmp, ])
				if self.optimize_in_every_add_index :
					self.writer.optimize()
				self.writer.close()

				shutil.rmtree(self.storage_path_tmp)
				self.storage_tmp = None
				self.storage_path_tmp = None

			self.wrtier = None

	def open_reader (self) :
		return lucene.IndexReader.open(self.storage)

	def close_reader (self) :
		if self.reader :
			self.reader.close()
			self.reader = None

class Indexer (__LUCENE__) :
	def clean (self) :
		while self.is_locked() :
			time.sleep(0.2)

		self.writer = self.open_writer(create=True)
		self.close_writer()

		return self

	def optimize (self) :
		while self.is_locked() :
			time.sleep(0.2)

		self.writer = self.open_writer()
		self.writer.optimize()
		self.close_writer()

		return self

class IndexWriter (__LUCENE__) :
	optimize_in_every_add_index = False

	def __init__ (self,
				storage_path=None,
				storage_type=None,
				optimize_in_every_add_index=False,
			) :
		super(IndexWriter, self).__init__(
			storage_path=storage_path, storage_type=storage_type)

		self.optimize_in_every_add_index = optimize_in_every_add_index

	def unindex_by_term (self, term) :
		self.writer = self.open_writer()
		self.writer.deleteDocuments(term)

		if self.writer.hasDeletions() :
			#print "removed"
			pass
		else:
			#print "un-removed"
			pass

		self.close_writer()

		return self

	unindex = unindex_by_term

	def create_document_from_object (self, obj) :
		info = core.Model.get_info(obj)

		doc = lucene.Document()
		for f, i in info.get("fields").items() :
			if not hasattr(obj, i.get("attrname")) : continue

			v = core.DocumentValue.to_index(
					i.get("type"),
					getattr(obj, i["attrname"]),
					delimeter=i.get("delimeter"),
					flatten=i.get("flatten", False),
			)

			if i.get("flatten", False) :
				doc.add(Field.new(
					i["name"],
					v,
					i.get("store", False),
					i.get("tokenize", False),
				))
			else :
				for v0 in v :
					if not v0.strip() : continue
					doc.add(Field.new(
						i["name"],
						v0,
						i.get("store", False),
						i.get("tokenize", False),
					))

		# add default field
		doc.add(Field.new(core.FIELD_NAME_UID, str(core.Model.get_uid(obj, obj.pk)), True, False,))
		doc.add(Field.new(core.FIELD_NAME_PK, str(obj.pk), True, False,)) 
		doc.add(Field.new(core.FIELD_NAME_MODEL, core.Model.get_name(obj), True, False,))

		return doc

	def index (self, doc, uid=None) :
		# check whether index db is locked.
		if not self.is_locked() :
			storage = self.storage
			create = False
		else :
			create = True
			# open temp storage
			self.storage_path_tmp = tempfile.mkdtemp(
				"",
				"storage-",
				self.default_storage_path,
			)
			self.storage_tmp = self.open_storage(self.storage_path_tmp)
			storage = self.storage_tmp

			print ">> Open tmp storage, %s" % self.storage_path_tmp

		self.writer = self.open_writer(create=create, storage=storage)
		if uid :
			self.writer.updateDocument(
				Term.new(core.FIELD_NAME_UID, uid, ),
				doc,
			)
		else :
			self.writer.addDocument(doc, )

		self.writer.close()

		return self

def index_object (obj) :
	try :
		w = IndexWriter()
		w.index(w.create_document_from_object(obj), )
		w.close()
	except Exception, e :
		print e
		return False

	return True

def index_update_object (obj) :
	try :
		w = IndexWriter()
		w.index(
			w.create_document_from_object(obj),
			uid=str(core.Model.get_uid(obj, obj.pk)),
		)
		w.close()
	except Exception, e :
		print e
		return False

	return True

def unindex_object (obj) :
	try :
		w = IndexWriter()
		w.unindex(
			Term.new(core.FIELD_NAME_UID, str(core.Model.get_uid(obj, obj.pk)))
		)
		w.close()
	except Exception, e :
		print e
		return False

	return True

class Searcher (__LUCENE__) :
	def __init__ (self, storage_path=None, storage_type=None, ) :
		super(Searcher, self).__init__(
			storage_path=storage_path,
			storage_type=storage_type,
		)

	def get_document_by_uid (self, uid) :
		query = BooleanQuery()
		query.add(
			lucene.TermQuery(
				Term.new(core.FIELD_NAME_UID, uid)
			),
			QUERY_BOOLEANS.get("AND"),
		)

		try :
			return list(self.search(query))[0]
		except :
			return None

	def get_hits (self, query, sort=lucene.Sort.RELEVANCE, slice=None) :
		_open_searcher = False
		if self.searcher is None :
			_open_searcher = True
			self.searcher = self.open_searcher()

		try :
			hits = self.searcher.search(query, sort)
		except SystemError :
			hits = self.searcher.search(query, lucene.Sort.RELEVANCE)

		if _open_searcher :
			self.close_searcher()

		return hits

	def search (self, query, sort=lucene.Sort.RELEVANCE, slice=None) :
		self.searcher = self.open_searcher()

		hits = self.get_hits(query, sort=sort, slice=slice, )
		if settings.DEBUG > 1 :
			print "\t", hits.length(), "[II] ", query

		n = 0
		hits_iterator = hits.iterator()
		while True :
			if not hits_iterator.hasNext() :
				break

			hit = hits_iterator.next()
			if slice and slice.start and n < slice.start :
				n += 1
				continue

			if slice and slice.stop and n >= slice.stop :
				break

			hit = lucene.Hit.cast_(hit)
			try:
				yield (hit, hit.getDocument(), )
			except lucene.JavaError :
				break

			n += 1

		self.close_searcher()

class Reader (__LUCENE__) :
	def __init__ (self, storage_path=None, storage_type=None, ) :
		super(Reader, self).__init__(
			storage_path=storage_path,
			storage_type=storage_type,
		)

		self.__last_modified_time = None
		self.num_docs_cache = None

	def numDocs (self) :
		__last = self.last_modified_time()
		if self.num_docs_cache and self.__last_modified_time and __last >= self.__last_modified_time :
			return self.num_docs_cache
		else :
			reader = self.open_reader()
			self.__last_modified_time = __last

			self.num_docs_cache = reader.numDocs()
			reader.close()

		return self.num_docs_cache

	def get_version (self) :
		return lucene.IndexReader.getCurrentVersion(self.storage)

	def is_optimized (self) :
		reader = self.open_reader()
		return reader.isOptimized()

	def last_modified_time (self) :
		return datetime.datetime.fromtimestamp(
			lucene.IndexReader.lastModified(self.storage) / 1000
		)

class Term (object) :
	def new (self, field_name, v) :
		return lucene.Term(field_name, v, )

	new		= classmethod(new)

class Field (object) :
	def new (self, field_name, value, store=False, tokenize=False, ) :
		return lucene.Field(
			field_name,
			value,
			store and lucene.Field.Store.YES or lucene.Field.Store.NO,
			tokenize and lucene.Field.Index.TOKENIZED or lucene.Field.Index.UN_TOKENIZED,
			tokenize and lucene.Field.TermVector.WITH_POSITIONS_OFFSETS or lucene.Field.TermVector.NO,
		)

	new		= classmethod(new)

class BooleanQuery (lucene.BooleanQuery) :
	def __init__ (self) :
		super(BooleanQuery, self).__init__()
		self.setMaxClauseCount(MAXINT)

class TermQuery (lucene.PhraseQuery) :
	def __init__ (self, term=None) :
		super(TermQuery, self).__init__()
		if isinstance(term, lucene.Term) :
			for t in term.text().split() :
				self.add(Term.new(term.field(), t))

class Query (lucene.Query) :

	def parse (self, query_string) :
		qparser = lucene.QueryParser("", lucene.StandardAnalyzer())
		qparser.setDefaultOperator(QUERY_OPERATORS.get("AND"))

		# FIXME:it does not work.
		qparser.setLowercaseExpandedTerms(False)

		return qparser.parse(query_string)

	parse = classmethod(parse)


"""
Description
-----------


ChangeLog
---------


Usage
-----


"""

__author__ =  "Spike^ekipS <spikeekips@gmail.com>"
__version__=  "0.1"
__nonsense__ = ""






