#!/usr/bin/env python

#################################################################################
#
# Copyright (c) 2006 Michigan State University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
#################################################################################

#################################################################################
#
#	Author:
#		Nathan Collins <npcollins@gmail.com>
#
#################################################################################

"""
Search CGI script. Required CGI for PyLucene due to threading issues between
mod_python and PyLucene.
"""

import utility
import cgi
import termsparse
import fieldmapping
import PyLucene
from datetime import datetime
import urlcheck
import urllib
import searchloop
import recordfunctions
import recordshort
import recordlong
import math
import oaicookie
from oaisimilarity import oaiSimilarity

# dummy outputclass
class dummyoutput:
	def __init__(self):
		self.document = ''

	def write(self, text):
		print text
	
	def finished(self):
		print self.document

# begin execution
def search():
	# psp like vars
	req = dummyoutput()
	form = cgi.FieldStorage()

	### VARIABLES

	# the directory of the lucene index
	lucenedir = '/var/lib/lucene/oais'

	# maximum number of results
	maxresults = 20

	# flag to indicate a go for printing results
	resultsok = 1

	# the raw search string
	searchraw = ''
	if form.has_key('s'): searchraw = form['s'].value

	# the 'within a search' search string
	withinraw = ''
	if form.has_key('w'): withinraw = form['w'].value

	# the location within search results
	searchloc = '1'
	if form.has_key('l'): searchloc = form['l'].value

	# convert location to number, on fail, set to 1
	try: searchloc = int(searchloc)
	except: searchloc = 1

	# should the result(s) be expanded by default
	searchexpand = '0'
	if form.has_key('e'): searchexpand = form['e'].value
	# convert expand on default to number, on fail, set to 0
	try: searchexpand = int(searchexpand)
	except: searchexpand = 0


	# content type
	req.write("""Content-type: text/html; charset=utf-8\n""")

	### HEADERS

	# doctype
	req.write("""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n""")

	req.write("""<html>\n<head>""")

	utility.writefile('html/pg_header.html', req)

	### Javascript files

	# javascript files needed on this page
	jsfiles = ['ajax','tips','cookies','search','history','cookies','rememberwin','tagging']


	# for each js file, output a script tag
	for js in jsfiles:
		utility.writejavascript(js, req)

	# load scripts for non IE users
	req.write("""
	<![if gt IE 7]>
	<script type="text/javascript" src="onload.js"></script>
	<![endif]>

	</head>
	""")

	################################################
	## Search results and all output preparation

	# set the search as valid, flag if parsing fails
	invalidsearch = 0
	# set this flag if a java error is thrown
	javaerror = 0

	mainsearch = ""
	withinsearch = ""

	try:
		# parse and swap fields
		mainsearch, p = termsparse.parseLuceneSearch(searchraw)
		mainsearch = mainsearch.toLucene()
		mainsearch = fieldmapping.querymapalias2actual(mainsearch)
		mainsearch = fieldmapping.querymapdc2actual(mainsearch)

	except:
		invalidsearch = 1

	try:
		# same for within
		withinsearch, p = termsparse.parseLuceneSearch(withinraw)
		withinsearch = withinsearch.toLucene()
		withinsearch = fieldmapping.querymapalias2actual(withinsearch)
		withinsearch = fieldmapping.querymapdc2actual(withinsearch)

	except:
		withinsearch = ''

	# get list of terms for boldification
	termslist, p = termsparse.getLuceneTerms(mainsearch)


	### PARSE QUERY ###
	# the lucene searcher
	searcher = PyLucene.IndexSearcher(lucenedir)

	# set the similarity of the searcher
	searcher.setSimilarity(oaiSimilarity())

	# create lucene query parser, default search field is 'keywords'
	parser = PyLucene.QueryParser("keywords", PyLucene.StandardAnalyzer())

	

	# get the query from the parser
	try:
		# first try the normal query
		query = parser.parse(mainsearch)
		
	except:
		# if the safe terms fail, report invalid search
		invalidsearch = 1

	try:
		# first try normal parsing
		withinquery = parser.parse(withinsearch)

	except:
		# if safe fails, then discard the within query
		withinquery = ''
	
	### PERFORM QUERY ###

	# create lucene boolean query
	boolquery = PyLucene.BooleanQuery()

	# set the maximum clause count, mainly for limiting range searches (default 1024)
	boolquery.setMaxClauseCount(8192)

	# only perform search if valid
	if invalidsearch == 0:
	
		# add search query to boolean query
		boolquery.add(query, PyLucene.BooleanClause.Occur.MUST)

		# if this is a search within a search, add within to boolean query
		if len(withinraw) > 1:
			# adding within search to boolean query
			boolquery.add(withinquery, PyLucene.BooleanClause.Occur.MUST)


	results = []
	duration = "0.00"

	try:

		if invalidsearch == 0:
			# time the lucene search
			startsearch = datetime.now()

			# call search start
			results = searcher.search(boolquery)
			
			# calculate search time
			duration = datetime.now() - startsearch

			# parse duration
			duration = str(duration).split(':')[2][0:6]

	except PyLucene.JavaError:
		# catch java search errors

		#org.apache.lucene.search.BooleanQuery$TooManyClauses

		javaerror = 1


	# total number of results
	resultsnum = len(results)

	# number of the first result on the page
	if resultsnum == 0:
		pagestartnum = 0
	else:
		pagestartnum = searchloc

	# number of the last result on this page
	pageendnum = min(resultsnum, searchloc + maxresults - 1)

	### 'BODY SECTION' ###
	req.write("""<body id='body'>""")

	# output the title
	utility.writefile('html/pg_title_compact.html', req)

	# output the bar under the title
	data = (searchraw, pagestartnum, pageendnum, utility.commas(resultsnum), duration)
	req.write("""
	<![if gt IE 7]>
	<div class="subbar barDark margin2em"></div>
	<div class="subbar barMid margin2em"></div>
	<![endif]>
	<div id="bar1" class="margin2em">
		<span class="smallish">&nbsp;Your search was 
		<b>%s</b>.
		Viewing %s - %s of a total
		%s results in
		%s seconds.</span>
	</div>
	""" % data)

	# output the search box and history pop-up link
	utility.writefile('html/pg_search_results.html', req, (utility.aposescape(searchraw), utility.aposescape(withinraw)))

	# output the history floater
	req.write("""<div id="historyFloat">Failed to load history.</div>""" )

	# output the tab window
	req.write("""<![if gt IE 7]>
		<div id='remembermaster'>
		<div id='rememberwin' class='f_left'>
		<br>&nbsp;Loading...<br>
		</div>%s</div>
		<![endif]>""" % oaicookie.printTabs(1) )

	### 'RESULTS' ###
	req.write("""
	<![if gt IE 7]>
	<div class="results">
	<![endif]>
	<!--[if lte IE 7]>
	<div class="resultsIE">
	<![endif]-->
	""")

	# check to see if search was invalid
	if invalidsearch == 1:
		# if so, print invalid search message
		utility.writefile('html/pg_search_invalid.html', req)
	# end invalid check

	# check if a java error was thrown earlier
	if javaerror == 1:
		# output search error
		utility.writefile('html/pg_search_javaerror.html', req)
	# end java error check

	# check to see if the results request is acceptable (first 1000 results)
	if searchloc + maxresults > 1000:
		# has browsed too far
		utility.writefile('html/pg_search_over.html', req)
		# no not print results
		resultsok = 0
	# end location check

	### RECORD OUTPUT ###

	# page result number
	pageresultnum = 0

	# if nothing has happened to stop results from printing
	if resultsok == 1:
		# get the list of results
		resultlist = searchloop.searchloop(results, pagestartnum, pageendnum)

		# loop and print results
		for result in resultlist:
			# output the record

			req.write("""<div class="margin1em"><div class="resultShad">""")

			# check to see if there is a valid url for record
			validurl = urlcheck.checkUrl(result,1)

			# if no url is found
			if validurl == '':
				# print the no-url-found background
				req.write('<div id="res%s" class="resultItem badurl">' % pageresultnum)
			# otherwise, url is found
			else:
				# print normally
				req.write('<div id="res%s" class="resultItem">' % pageresultnum)

			# load up variables for passing to output
			recordvars = {}
			recordvars['validurl'] = validurl
			recordvars['boolquerystring'] = boolquery.toString()
			recordvars['searchraw'] = searchraw
			recordvars['pageresultnum'] = pageresultnum
			recordvars['termslist'] = termslist

			# check whether the expand flag is set
			if searchexpand == 1:
				# print expanded output
				recordlong.outlong(result, recordvars, req)

			# flag isn't set
			else:
				# print a shortened record output
				recordshort.outshort(result, recordvars, req)
		
			# end record output

			req.write("""</div></div></div>""")

			# increment the page result
			pageresultnum += 1

		# end result loop

	# end results if
	req.write("""</div>""")

	### 'FOOTER SECTION' ###

	# max links on either side of current page
	maxlinks = 8

	### TODO: cleanup this mess
	pages = math.ceil(min(1000, resultsnum) / (maxresults * 1.0))
	if (pages - 1) * maxresults < resultsnum:
		pages += 1

	if int(pages) > 1:
		req.write("""<br><div class="pages">Page: &nbsp;&nbsp;""")

	for p in range(0, int(pages) - 1):
		if searchloc >= p*maxresults +1 and searchloc < (p+1)*maxresults:
			req.write( p+1 )
		elif searchloc >= (p-maxlinks)*maxresults +1 and searchloc < (p+1+maxlinks)*maxresults:
			req.write("""<a href="search.py?l=%s&s=%s&w=%s">%s</a>""" % (p*maxresults+1, urllib.quote_plus(searchraw), urllib.quote_plus(withinraw), p+1))

	req.write("""</div>""")

	utility.writefile('html/pg_footer_advanced.html', req)

	req.write("""</body></html>""")

	# for .py only
	req.finished()


if __name__ == '__main__':
	search()

# eof

