#!/usr/bin/env python

#################################################################################
#
# Copyright (c) 2006 Michigan State University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
#################################################################################

#################################################################################
#
#	Author:
#		Nathan Collins <npcollins@gmail.com>
#
#################################################################################

from math import log

# the search similarity
class oaiSimilarity(object):
	"""
	The score of query q for docuemnt d is defined as:

	score(q,d) = SUM[t in q] of 

		tf(t in d) * idf(t)^2 * getBoost(t in q) *
		getBoost(t.field in d) * lengthNorm(t.field in d)

		MULTIPLIED BY

		coord(q,d) * queryNorm(sumOfSquaredWeights)

		WHERE

		sumOfSquaredWeights = SUM[t in q] of

			( idf(t) * getBoost(t in q) )^2

	-------------------------------------------------------------
	getBoost sets the boost to that score factor, default is 1.0

	"""
	def lengthNorm(self, field, numTerms):
		"""
		Computes the normalization value for a field given the total number
		of terms contained in a field.
		
		Matches in longer fields are less precise, so implementations of this
		method usually return smaller values when numTokens is large, and
		larger values when numTokens is small.
		"""
		# default 1/sqrt(numTerms)

		# this could cause unusually high score based off short metadata
		# due to the way the metadata is searched, length should not be an issue
		return (log(numTerms) + 1) / 2.0

	def queryNorm(self, sumOfSquaredWeights):
		"""
		Computes the normalization value for a query given the sum of the
		squared weights of each of the query terms.

		This does not affect ranking, but rather just attempts to make scores
		from different queries comparable.
		"""
		# default 1/sqrt(sumOfSquaredWeights)

		# catch for searches where all keywords are stopwords or excluded
		if sumOfSquaredWeights == 0: sumOfSquaredWeights = 1
		return 1/(sumOfSquaredWeights**0.5)

	def tf(self, freq):
		"""
		Computes a score factor based on a term or phrase's frequency in a
		document.

		Terms and phrases repeated in a document indicate the topic of the
		document, so implementations of this method usually return larger values
		when freq is large, and smaller values when freq  is small.
		"""
		# default sqrt(freq)
		return freq**0.8

	def sloppyFreq(self, distance):
		"""
		Computes the amount of a sloppy phrase match, based on an edit distance.

		A phrase match with a small edit distance to a document passage more
		closely matches the document, so implementations of this method usually
		return larger values when the edit distance is small and smaller values
		when it is large.
		"""
		# default 1 / (distance + 1)
		return 1 / (distance + 1)


	def idfTerms(self, terms, searcher):
		"""
		Computes a score factor for a collection of terms.
		"""
		sum = 0.0
		for t in terms:
			sum += self.idfTerm(t,searcher)
		return sum

	def idfTerm(self, term, searcher):
		"""
		Computes a score factor for a simple term.
		"""
		# default self.idf( searcher.docFreq(term), searcher.maxDoc() )
		return self.idf( searcher.docFreq(term), searcher.maxDoc() )

	def idf(self, docFreq, numDocs):
		"""
		Computes a score factor based on a term's document frequency (the
		number of documents which contain the term).
		
		Terms that occur in fewer documents are better indicators of topic,
		so implementations of this method usually return larger values for
		rare terms, and smaller values for common terms.
		"""
		# default log(numDocs/(docFreq+1)) + 1
		return log(numDocs/(docFreq+1)) + 1

	def coord(self, overlap, maxOverlap):
		"""
		Computes a score factor based on the fraction of all query terms that
		a document contains.

		The presence of a large portion of the query terms indicates a better
		match with the query, so implementations of this method usually return
		larger values when the ratio between these parameters is large and
		smaller values when the ratio between them is small.
		"""
		# default overlap / maxOverlap

		return 1.0

# eof

