#!/usr/bin/python
#
# Copyright (c) 2009 Julius Volz
# See LICENSE for details.

import math, rdflib, urllib2, xapian
from urllib2 import urlparse
import conv, debug, function
from febrl import stringcmp as strcmp


class Error(function.Error):
  """Used to signal errors during metric evaluation."""

metrics = {}

def get(identifier):
  if identifier not in metrics:
    raise Error('Metric "%s" not defined!' % identifier)
  return metrics[identifier]

def register(*args):
  for metric in args:
    function.register(metric)

    if not issubclass(type(metric), Metric):
      raise Error('Trying to register non-metric "%s" as metric!' % metric)

    if metric.identifier in metrics:
      raise Error('Metric "%s" registered twice!' % metric.identifier)
    metrics[metric.identifier] = metric

def unregister_user_metrics():
  user_metrics = []
  for id in metrics:
    if issubclass(type(metrics[id]), UserMetric):
      user_metrics.append(id)

  for id in user_metrics:
    del metrics[id]
    function.unregister(id)
 

class MetricAggregation(object):
  AGGREGATION_TYPES = [
    'AVG',
    'MAX',
    'MIN',
    'EUCLID',
    'PRODUCT'
  ]

  def __init__(self, aggr_type, metrics):
    if aggr_type not in self.AGGREGATION_TYPES:
      raise 'Invalid aggregation type "%s"!' % aggr_type
    self.aggr_type = aggr_type
    self.metrics = metrics

  def evaluate(self, bindings, trace=None):
    results = []
    total_weights = 0
    debug.log_indent(5, 'Entering metric aggregation\n')

    metrics_trace = None
    if trace is not None:
      trace.update({
        'obj': self,
        'metrics': []
      })
      metrics_trace = trace['metrics']

    for metric, options in self.metrics:
      metric_trace = None
      if trace is not None:
        metric_trace = {
          'weight': options['weight'],
          'optional': options['optional'],
          'default': options['default']
        }
        metrics_trace.append(metric_trace)


      value = metric.evaluate(bindings, metric_trace)

      if trace is not None:
        metric_trace['value'] = value

      debug.log(5, "value = %s\n" % (value))

      if value is None and options['default'] is not None:
        value = options['default']

      if value is None:
        if not options['optional']:
          debug.log_outdent(5, 'Exiting metric aggregation\n')
          return None
        else:
          continue
      else:
        value = float(value)
        if self.aggr_type == 'AVG':
          results.append(value * options['weight'])
          total_weights += options['weight']
        elif self.aggr_type in ('EUCLID', 'PRODUCT'):
          for i in xrange(0, int(options['weight'])):
            results.append(1 - value)
            total_weights += 1
        else:
          results.append(value)

    debug.log_outdent(5, 'Exiting metric aggregation\n')

    # if no submetrics exist or none evaluated correctly
    if len(results) == 0:
      return None
    if self.aggr_type == 'AVG':
      return sum(results) / total_weights
    if self.aggr_type == 'EUCLID':
      distance = math.sqrt(sum([r*r for r in results])) / math.sqrt(total_weights)
      return 1 - distance
    if self.aggr_type == 'PRODUCT':
      distance = reduce(lambda x, y: x*y, results)
      return 1 - distance
    if self.aggr_type == 'MAX':
      return max(results)
    if self.aggr_type == 'MIN':
      return min(results)


class Metric(function.Function):
  pass


class UserMetric(Metric):
  def __init__(self, identifier, param_types, aggregation):
    self.identifier = identifier
    self.param_types = param_types
    self.aggregation = aggregation

  def evaluate(self, bindings):
    return self.aggregation.evaluate(bindings)


class MetricCall(function.FunctionCall):
  def __init__(self, metric, params):
    super(MetricCall, self).__init__(metric, params)

  def call_func(self, param_values):
    if issubclass(type(self.func), UserMetric):
      return self.func.evaluate(param_values)

    return self.func.evaluate(**param_values)


class NumSimilarity(Metric):
  identifier = 'numSimilarity'
  param_types = {
    'num1': conv.decimal,
    'num2': conv.decimal
  }

  def evaluate(self, num1, num2):
    debug.log(7, '%s: %s <=> %s\n' % (self.identifier, num1, num2))
    if num1 == 0 or num2 == 0:
      if num1 == num2:
        return 1
      else:
        return 0
    return min(num2 / num1, num1 / num2)


class DateEquality(Metric):
  identifier = 'dateEquality'
  param_types = {
    'date1': conv.date,
    'date2': conv.date
  }

  def evaluate(self, date1, date2):
    if date1 == date2:
      return 1
    else:
      return 0


class DateSimilarity(Metric):
  identifier = 'dateSimilarity'
  param_types = {
    'date1': conv.date,
    'date2': conv.date,
    'max_diff': conv.integer
  }

  def evaluate(self, date1, date2, max_diff):
    diff_days = abs(date1-date2).days

    if diff_days > max_diff:
      return 0
    else:
      return (max_diff - diff_days) / max_diff


class StringEquality(Metric):
  identifier = 'stringEquality'
  param_types = {
    'str1': conv.string,
    'str2': conv.string
  }

  def evaluate(self, str1, str2):
    if str1 == str2:
      return 1
    else:
      return 0


class StringSimilarity(Metric):
  identifier = 'string_similarity'
  algorithm = 'jaro'
  param_types = {
    'str1': conv.string,
    'str2': conv.string
  }

  def evaluate(self, str1, str2):
    debug.log(7, '%s: %s <=> %s\n' % (self.identifier, repr(str1), repr(str2)))
    if str1 == str2:
      return 1
    else:
      return strcmp.do_stringcmp(self.algorithm, str1,
                                 str2)[0]


class JaroSimilarity(StringSimilarity):
  identifier = 'jaroSimilarity'
  algorithm = 'jaro'


class JaroWinklerSimilarity(StringSimilarity):
  identifier = 'jaroWinklerSimilarity'
  algorithm = 'jaro-winkler'


class QGramSimilarity(StringSimilarity):
  identifier = 'qGramSimilarity'
  algorithm = 'qgram'


class MaxSimilarityInSets(Metric):
  identifier = 'maxSimilarityInSets'
  param_types = {
    'set1': [conv.raw],
    'set2': [conv.raw],
    'submetric': conv.string
  }

  def evaluate(self, set1, set2, submetric):
    sm = get(submetric)

    max = 0
    for el1 in set1:
      for el2 in set2:
        value = sm.evaluate({'item1': [el1], 'item2': [el2]})
        if value > max:
          max = value

    return max


class SetSimilarity(Metric):
  identifier = 'setSimilarity'
  param_types = {
    'set1': [conv.raw],
    'set2': [conv.raw],
    'submetric': conv.string,
    'threshold': conv.decimal
  }

  def evaluate(self, set1, set2, submetric, threshold):
    total_elements = len(set1) + len(set2)
    same_elements = 0

    sm = get(submetric)

    # find no. of similar elements
    for el1 in set1:
      for el2 in set2:
        if sm.evaluate({'item1': [el1], 'item2': [el2]}) > threshold:
          same_elements += 1
          # break out into outer loop after one match
          break

    # sqrt(same / (total - same))
    return math.sqrt(1 / (total_elements/same_elements - 1))


class URIEquivalence(Metric):
  identifier = 'uriEquivalence'
  param_types = {
    'uri1': conv.string,
    'uri2': conv.string
  }

  def evaluate(self, uri1, uri2):
    if uri1 == uri2:
      return 1

    # some things from RFC 3986, Section 6.2, but could implement more...

    # %-decoding
    uri1 = urllib2.unquote(uri1)
    uri2 = urllib2.unquote(uri2)

    (scheme1, host1, path1, params1, query1, frag1) = urlparse.urlparse(uri1)
    (scheme2, host2, path2, params2, query2, frag2) = urlparse.urlparse(uri2)

    # case normalization for scheme and hostname part
    if (scheme1.lower() == scheme2.lower() and
        host1.lower() == host2.lower() and
        path1 == path2 and
        params1 == params2 and
        query1 == query2 and
        frag1 == frag2):
      return 1

    return 0
    

class TaxonomicSimilarity(Metric):
  identifier = 'taxonomicSimilarity'
  param_types = {
    'concept1': conv.string,
    'concept2': conv.string,
    'milestones': conv.string,
    'filename': conv.string
  }

  taxonomies = {}

  class Taxonomy(object):
    def __init__(self, filename):
      self.nodes = {}

      g = rdflib.ConjunctiveGraph()
      g.parse(filename, format="xml")

      for uri in g.all_nodes():
        self.nodes[str(uri)] = {'uri': str(uri), 'parent': None}

      subclass_properties = [
        'http://www.w3.org/2000/01/rdf-schema#subClassOf',
        'http://www.w3.org/2006/03/wn/wn20/schema/hyponymOf',
        'http://www.w3.org/2004/02/skos/core#broader'
      ]

      for (s, p, o) in g.triples((None, None, None)):
        if str(p) in subclass_properties:
          self.nodes[str(s)]['parent'] = self.nodes[str(o)]

      for node in self.nodes.values():
        node['depth'] = self.calc_depth(node)

      self.max_depth = max([n['depth'] for n in self.nodes.values()])

      for node in self.nodes.values():
        node['linear_milestone'] = 1 - 0.5 * (float(node['depth']) / self.max_depth)
        k = 2 # TODO: make configurable
        node['exponential_milestone'] = 0.5 / pow(k, node['depth'])

    def calc_depth(self, node):
      depth = 0
      while node['parent'] is not None:
        depth += 1
        node = node['parent']
      
      return depth

    def distance(self, concept1, concept2, milestone_type):
      if milestone_type not in ['linear', 'exponential']:
        raise Error('Unknown milestone type: "%s"' % milestone_type)

      try:
        node1 = self.nodes[concept1]
        node2 = self.nodes[concept2]
      except:
        return None

      ms_key = '%s_milestone' % (milestone_type)
      ccp = self.closest_common_parent(node1, node2)
      return (ccp[ms_key] - node1[ms_key]) + (ccp[ms_key] - node2[ms_key])

    def closest_common_parent(self, node1, node2):
      # go up to same depth in tree
      while node1['depth'] > node2['depth']:
        node1 = node1['parent']
      while node1['depth'] < node2['depth']:
        node2 = node2['parent']

      # walk up the tree until we meet at the closest common parent
      while node1 != node2:
        node1 = node1['parent']
        node2 = node2['parent']

      return node1

  def evaluate(self, concept1, concept2, milestones, filename):
    if not filename in self.taxonomies:
      self.taxonomies[filename] = self.Taxonomy(filename)

    dist = self.taxonomies[filename].distance(concept1, concept2, milestones)

    if dist is None:
      return None

    return 1 - dist


class IndexLookup(Metric):
  identifier = 'indexLookup'
  param_types = {
    'search': conv.string,
    'result': conv.string,
    'index_dir': conv.string,
    'limit': conv.integer
  }

  indexes = {}

  results = {}

  def evaluate(self, search, result, index_dir, limit):
    hash_key = hash((search, index_dir, limit))

    if hash_key not in self.results:
      database = self.get_index(index_dir)
      enquire = xapian.Enquire(database)
      qp = xapian.QueryParser()
      qp.set_database(database)
      query = qp.parse_query(search, qp.FLAG_SPELLING_CORRECTION)
      corrected = qp.get_corrected_query_string()
      if corrected:
        query = qp.parse_query(corrected)

      enquire.set_query(query)

      if len(self.results) > 1000:
        self.results.clear()
      self.results[hash_key] = [hit for hit in enquire.get_mset(0, limit)]

    for hit in self.results[hash_key]:
      if hit.document.get_data() == result:
        return 1 - (float(hit.get_rank()) / limit) / 2

    #for hit in enquire.get_mset(0, limit):
    #  if hit.document.get_data() == result:
    #    return 1 - (float(hit.get_rank()) / limit) / 2

    return 0

  def get_index(self, index_dir):
    if not index_dir in self.indexes:
      self.indexes[index_dir] = xapian.Database(index_dir)

    return self.indexes[index_dir]

register(
  NumSimilarity(),
  StringEquality(),
  DateEquality(),
  DateSimilarity(),
  JaroSimilarity(),
  JaroWinklerSimilarity(),
  QGramSimilarity(),
  MaxSimilarityInSets(),
  SetSimilarity(),
  TaxonomicSimilarity(),
  IndexLookup()
)
