#!/usr/bin/env python

#################################################################################
#
# Copyright (c) 2006 Michigan State University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
#################################################################################

#################################################################################
#
#	Author:
#		Nathan Collins <npcollins@gmail.com>
#
#################################################################################

import sys
import re
import types, codecs
from PyLucene import IndexWriter, StandardAnalyzer, Document, Field
from querymgr import *
from utility import *
from hexHashIter import *
from oaivars import *

lucene_dir = g['LUCDIR']


# indexing class
class lucindex:
	def __init__(self,writer):
		self.writer = writer

		self.hexHash = hexHashIter(4)
		self.curHash = self.hexHash.getString()

	def process_batch(self):
		self.curHash = self.hexHash.getString()
		# prepare query string for pulling records from harvest table
		self.query = "SELECT hDatestamp,iDatestamp,mTitle,hDeleted," + \
			"mPublisher,mDescription,mDate,mSubject,mContributor," + \
			"mCreator,mType,mFormat,mIdentifier,mSource,mLanguage," + \
			"mRelation,mCoverage,mRights,hIdentifier,recHash,iContact," + \
			"iRepository,iOAIURL,iHandleDelete,iProtocolVer, cLanguage, " + \
			"cValidUrl, iMetadataDirect " + \
			"FROM harvestSearch " + \
			"WHERE recHash LIKE '%s%%';" % self.curHash

		# retrieve block of records from harvest table
		results = dbquery(self.query)

		# if there are no results, indicate and return
		if results is None:
			return self.hexHash.increment()

		# process each row
		for result in results:
			# add to writer
			self.writer.addDocument(self.add_rec(result))

		# returns 1 while not finished
		return self.hexHash.increment()

	def add_rec(self, rows):
		# new document to add
		doc = Document()

		# add fields to doc
		for k in rows:
			if k == 'recHash' or k == 'hDatestamp':
				doc.add(Field(k, rows[k],
					Field.Store.YES,
					Field.Index.UN_TOKENIZED))
			else:
				fieldv = rows[k]
				if type(fieldv) != types.UnicodeType:
					fieldv = str(fieldv)
					# ignore codec conversion errors
					fieldv = codecs.utf_8_decode(fieldv,'ignore')[0]

				doc.add(Field(k, fieldv,
					Field.Store.YES,
					Field.Index.TOKENIZED))

		
		userMod = self.getUserModString(rows['recHash'])

		keywords = ' '.join((rows['mTitle'], rows['mDescription'], rows['mCreator'], rows['mSubject'])) + ' ' + userMod

		keywords = codecs.utf_8_decode(keywords,'ignore')[0]

		doc.add(Field('keywords', keywords,
			Field.Store.NO,
			Field.Index.TOKENIZED))

		# return the document
		return doc

	def getUserModString(self,rechash):
		# how many years back to use user data
		yearsback = 3
		daysback = 365 * yearsback

		# query pull
		umQuery = "SELECT keywordSearch FROM harvestUserMod " + \
			"WHERE recHash = '" + rechash + "' AND " + \
			"DATEDIFF(CURDATE(),stamp) < " + str(daysback) + ";"

		results = dbquery(umQuery)

		# if there are no results, indicate and return
		if results is None:
			return ''

		# string to return
		stringRet = ''

		# process each row
		for result in results:
			# keyword search
			stringRet += " " + result['keywordSearch']

		return stringRet


# the main class
class main:
	def __init__(self):
		# directory of index data
		global lucene_dir
		
		self.index_dir = lucene_dir
		
		# progress indicator
		self.ind = progress('Indexing records','Finished',32)
		
		# begin indicator
		self.ind.begin()

		# connect and process
		self.connect()
		self.index_all(self.ind)
		self.disconnct()

		# indicate finished
		self.ind.done()

	def connect(self):
		# instantiate lucene objects
		
		# is this a new index (e.g. delete and recreate?)
		recreate_index = True
		
		# writer class
		self.writer = IndexWriter(self.index_dir, StandardAnalyzer(), recreate_index)
		
		# set maxMergeDocs
		self.writer.setMaxMergeDocs(500000)

	def index_all(self, ind):
		# index all the records
		luc = lucindex(self.writer)

		# progress indicator
		progi = ind

		# process until finished
		while luc.process_batch() > 0:
			progi.dot()
			

	def disconnct(self):
		# optimize and close lucene objects
		try:
			self.writer.optimize()
		except:
			print 'Error: Unable to optimize.'
		self.writer.close()

# file is being executed, start main
if __name__ == '__main__':
	main()

# eof

