# -*- coding: utf-8 -*-
#	Copyright 2005,2006,2007,2008 Spike^ekipS <spikeekips@gmail.com>
#
#	   This program is free software; you can redistribute it and/or modify
#	it under the terms of the GNU General Public License as published by
#	the Free Software Foundation; either version 2 of the License, or
#	(at your option) any later version.
#
#	This program is distributed in the hope that it will be useful,
#	but WITHOUT ANY WARRANTY; without even the implied warranty of
#	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#	GNU General Public License for more details.
#
#	You should have received a copy of the GNU General Public License
#	along with this program; if not, write to the Free Software
#	Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

import os, datetime, glob, time, tempfile, shutil
import lucene

from django.dispatch import dispatcher
from django.db.models import signals
from django.conf import settings
from django.db.models import options as options_django, fields as fields_django

import core

QUERY_BOOLEANS = {
	"AND"	: lucene.BooleanClause.Occur.MUST,
	"OR"	: lucene.BooleanClause.Occur.SHOULD,
	"NOT"	: lucene.BooleanClause.Occur.MUST_NOT,
	False	: lucene.BooleanClause.Occur.MUST,
	True	: lucene.BooleanClause.Occur.MUST_NOT,
}

QUERY_OPERATORS = {
	"AND": lucene.QueryParser.Operator.AND,
	"OR" : lucene.QueryParser.Operator.OR,
}

MAXINT = int(2**31-1)

######################################################################
# Fields to lucene fields
# string
FIELDS_STR = (
	fields_django.OrderingField,
	fields_django.TextField,
	fields_django.CharField,
	fields_django.NullBooleanField,
	fields_django.SlugField,
	fields_django.XMLField,
)

# int
FIELDS_INT = (
	fields_django.SmallIntegerField,
	fields_django.PositiveSmallIntegerField,
	fields_django.PositiveIntegerField,
	fields_django.IntegerField,
	fields_django.FloatField,
	fields_django.AutoField,
	fields_django.BooleanField,
	fields_django.DecimalField,
)

# meta
FIELDS_META = (
	fields_django.USStateField,
	fields_django.CommaSeparatedIntegerField,
	fields_django.EmailField,
	fields_django.IPAddressField,
	fields_django.PhoneNumberField,
)

# date
FIELDS_DATE = (
	fields_django.TimeField,
	fields_django.DateField,
	fields_django.DateTimeField,
)
# path
FIELDS_MULTI_KEYWORD = (
	fields_django.FileField,
	fields_django.FilePathField,
	fields_django.ImageField,
	fields_django.URLField,
)

FIELDS_TYPE = {
	"str": {
		"store": True,
		"tokenize": True,
	},
	"int": {
		"store": True,
		"tokenize": True,
	},
	"keyword": {
		"store": True,
		"tokenize": False,
	},
	"date": {
		"store": True,
		"tokenize": False,
	},
	"meta": {
		"store": True,
		"tokenize": True,
	},
	"multi-keyword": {
		"store": True,
		"tokenize": False,
	},
}

def initialize_vm () :
	lucene.getVMEnv().attachCurrentThread()

def deinitialize_vm () :
	lucene.getVMEnv().detachCurrentThread()

def create_term (field_name, v) :
	return lucene.Term(field_name, v, )

def parse_query (query) :
	qparser = lucene.QueryParser("", lucene.StandardAnalyzer())
	qparser.setDefaultOperator(lucene.QueryParser.Operator.AND)

	# FIXME:it does not work.
	qparser.setLowercaseExpandedTerms(False)

	return qparser.parse(query)

def get_terms_from_query (query) :
	q = parse_query(query)

	h = lucene.HashSet()
	q.extractTerms(h)

	terms = dict()
	for t in h :
		t = lucene.Term.cast_(t)
		if not terms.has_key(t.field()) :
			terms[t.field()] = list()

		terms[t.field()].append(t.text())

	return terms

class BooleanQuery (lucene.BooleanQuery) :
	def __init__ (self) :
		super(BooleanQuery, self).__init__()
		self.setMaxClauseCount(MAXINT)

class TermQuery (lucene.PhraseQuery) :
	def __init__ (self, term=None) :
		super(TermQuery, self).__init__()
		if isinstance(term, lucene.Term) :
			for t in term.text().split() :
				self.add(create_term(term.field(), t))

class Enum2List (list) :
	def __init__ (self, e) :
		super(Enum2List, self).__init__()

		self.e = e
		while True :
			try :
				self.append(self.e.next())
			except StopIteration :
				break

######################################################################
# indexing & searching
class LuceneCore (object) :
	searcher = None
	reader = None
	writer = None
	storage_path = settings.SEARCH_STORAGE_PATH
	using_temp_storage = False
	optimize_in_every_add_index = False

	def __init__ (
				self,
				optimize_in_every_add_index=False,
			) :
		self.storage = self.open_storage()
		self.optimize_in_every_add_index = optimize_in_every_add_index

	def __del__ (self) :
		try : self.close()
		except : pass

	def get_analyzer(self) :
		#return lucene.StandardAnalyzer()
		return lucene.CJKAnalyzer()

	def is_locked (self) :
		self.open_reader()
		if self.reader is None :
			return False

		b = self.reader.isLocked(self.open_main_storage())
		self.close_reader()
		return b

	def check_db (self, ) :
		return os.path.isfile(
			os.path.join(self.storage_path, "segments.gen")
		)

	check_db = classmethod(check_db)

	def open_writer (self, create=False, check_lock=False, ) :
		if self.writer :
			self.writer.close()

		if check_lock and self.is_locked() :
			self.storage = self.open_temp_storage()

		created = os.path.isfile(
			os.path.join(self.storage_path, "segments.gen")
		)
		self.writer = lucene.IndexWriter(
			self.storage,
			self.get_analyzer(),
			create and True or not created,
		)

	def open_reader (self) :
		if self.reader :
			self.reader.close()

		try :
			self.reader = lucene.IndexReader.open(self.storage)
		except :
			pass

	def open_searcher (self) :
		if self.searcher :
			self.searcher.close()

		self.searcher = lucene.IndexSearcher(self.storage)

	def open_temp_storage (self) :
		self.storage_path = tempfile.mkdtemp("", "storage-", settings.SEARCH_STORAGE_PATH)

		self.using_temp_storage = True
		self.close_storage() # re-open storage by new temp directory.
		return self.open_storage()

	def open_main_storage (self) :
		return self.open_storage(settings.SEARCH_STORAGE_PATH)

	def open_storage (self, path=None) :
		if not path :
			path = self.storage_path

		if os.path.isdir(path) :
			created = True
		else :
			created = False
			try :
				os.mkdir(path)
			except OSError, e :
				raise OSError, e

		return lucene.FSDirectory.getDirectory(path, not created,)

	def close (self) :
		self.close_writer()
		self.close_searcher()
		self.close_reader()
		self.close_storage()

	def close_writer (self) :
		if self.using_temp_storage : # merge it
			__main_s = self.open_main_storage()
			while self.is_locked() :
				time.sleep(0.2)

			self.open_writer(check_lock=False)
			self.writer.addIndexes([self.storage, ])
			if self.optimize_in_every_add_index : self.writer.optimize()

		try: self.writer.close()
		except : pass

		if self.using_temp_storage : # remove current temp storage
			self.storage.close()

			shutil.rmtree(self.storage_path)

		self.writer = None

	def close_searcher (self) :
		try: self.searcher.close()
		except : pass
		self.searcher = None

	def close_reader (self) :
		try: self.reader.close()
		except : pass
		self.reader = None

	def close_storage (self) :
		try : self.storage.close()
		except :pass
		self.storage = None

class Reader (LuceneCore) :
	num_docs_cache = None
	__last_modified_time = None

	def __init__ (self) :
		super(Reader, self).__init__()
		self.open_reader()

	def numDocs (self) :
		__last = self.last_modified_time()
		if self.__last_modified_time and __last >= self.__last_modified_time :
			return self.num_docs_cache
		else :
			self.__last_modified_time = __last
			self.num_docs_cache = self.reader.numDocs()

		return self.num_docs_cache

	def get_current_version (self) :
		return self.reader.getCurrentVersion(self.storage)

	def get_version (self) :
		return self.reader.getVersion()

	def is_optimized (self) :
		return self.reader.isOptimized()

	def last_modified_time (self) :
		return datetime.datetime.fromtimestamp(
			self.reader.lastModified(self.storage) / 1000
		)

class Searcher (LuceneCore) :

	def get_document_by_uid (self, uid) :
		query = BooleanQuery()
		query.add(lucene.TermQuery(create_term(core.FIELD_NAME_UID, uid)), QUERY_BOOLEANS.get(False))
		try :
			return list(self.search(query))[0]
		except :
			return None

	def get_hits (self, query, sort=lucene.Sort.RELEVANCE, slice=None) :
		_open_searcher = False
		if self.searcher is None :
			_open_searcher = True
			self.open_searcher()

		try :
			hits = self.searcher.search(query, sort)
		except SystemError :
			hits = self.searcher.search(query, lucene.Sort.RELEVANCE)

		if _open_searcher :
			self.close_searcher()

		return hits

	def iterator (self, query, sort=lucene.Sort.RELEVANCE, slice=None) :
		self.open_searcher()

		hits = self.get_hits(query, sort=sort, slice=slice, )
		if settings.DEBUG :
			print
			print hits.length(), "[II] ", query

		n = 0
		hits_iterator = hits.iterator()
		while True :
			if not hits_iterator.hasNext() :
				break

			hit = hits_iterator.next()
			if slice and slice.start and n < slice.start :
				n += 1
				continue

			if slice and slice.stop and n >= slice.stop :
				break

			hit = lucene.Hit.cast_(hit)
			try:
				yield (hit, hit.getDocument(), )
			except lucene.JavaError :
				break

			n += 1

		self.close_searcher()

	search = iterator

class Indexer (LuceneCore) :
	def __init__ (self, optimize=False,) :
		super(Indexer, self).__init__()

	def clean (self) :
		__main_s = self.open_main_storage()
		while self.is_locked() :
			time.sleep(0.2)

		self.open_writer(create=True)

		self.close_writer()

		return self

	def optimize (self) :
		__main_s = self.open_main_storage()
		while self.is_locked() :
			time.sleep(0.2)

		self.open_writer()
		self.writer.optimize()
		self.close_writer()

		return self

class IndexWriter (LuceneCore) :
	created = False

	def __init__ (self, optimize=False,) :
		super(IndexWriter, self).__init__()

		self.optimizing = optimize
		self.open_writer(check_lock=True)

	def unindex_by_term (self, term) :
		self.writer.deleteDocuments(term)

		if self.writer.hasDeletions() :
			#print "removed"
			pass
		else:
			#print "un-removed"
			pass

		return self

	def unindex_by_object (self, object) :
		return self.unindex_by_term(
			create_term(
				core.FIELD_NAME_UID,
				core.get_object_uid(object),
			)
		)

	unindex = unindex_by_object

	def __index_default_fields (self, object) :
		fields = list()

		# store model and uid
		fields.append(
			lucene.Field(
				core.FIELD_NAME_MODEL,
				core.get_model_name(object.__class__),
				lucene.Field.Store.YES,
				lucene.Field.Index.UN_TOKENIZED,
			)
		)
		fields.append(
			lucene.Field(
				core.FIELD_NAME_UID,
				core.get_object_uid(object),
				lucene.Field.Store.YES,
				lucene.Field.Index.UN_TOKENIZED,
			)
		)

		if object.pk is not None :
			fields.append(
				lucene.Field(
					core.FIELD_NAME_PK,
					str(object.pk),
					lucene.Field.Store.YES,
					lucene.Field.Index.UN_TOKENIZED,
				)
			)

		return fields

	def index (self, object, update=False, ) :
		analyzer = self.get_analyzer()

		# get indexed document
		searcher = Searcher()
		doc = searcher.get_document_by_uid(core.get_object_uid(object))
		if doc is not None :
			self.unindex(object) # delete previous index

		attrs = core.MODELS_REGISTERED.get(core.get_model_name(object), None)
		try :
			doc = lucene.Document()
			[doc.add(f) for f in self.__index_default_fields(object)]

			for f, v in attrs.get("fields").items() :
				value = getattr(object, v["field"].name)
				if f == "url" :
					print v["field"].name, value

				for cf in core.create_field(
						v["field"],
						value,
						store=v.get("store"),
						tokenize=v.get("tokenize"),
						add_sort=v.get("add_sort"),
						analyzer=v.get("analyzer"),
					) :
					#print ">>", v["field"].name, value
					doc.add(cf)

			self.writer.addDocument(doc)
		finally :
			pass

		if self.optimizing :
			Indexer().optimize().close()

		return self

"""
Description
-----------


ChangeLog
---------


Usage
-----


"""

__author__ =  "Spike^ekipS <spikeekips@gmail.com>"




