#!/usr/bin/python
#
# Copyright (c) 2009 Julius Volz
# See LICENSE for details.

import os
import rdflib
import httplib
from rdflib.Graph import ConjunctiveGraph as Graph
import SPARQLWrapper as sw
import urllib2

import cPickle, gzip, time
import config, debug, silk

class Error(silk.Error):
  """Used to indicate errors during datasource handling."""


class DataSource(object):
  def __init__(self, id, endpoint_uri, graph, do_cache, page_size, pause,
               retry_count, retry_pause):
    self.id = id
    self.endpoint_uri = endpoint_uri
    self.graph = graph
    self.sparql = sw.SPARQLWrapper(endpoint_uri)
    self.do_cache = do_cache
    self.page_size = page_size
    self.pause_time = float(pause) / 1000
    self.last_query_time = None
    self.retry_count = retry_count
    self.retry_pause = float(retry_pause) / 1000

    if self.do_cache:
      self.cache = self.read_cache_from_disk()

  def query(self, query, no_cache=False, retries=None):
    debug.log(6, 'Executing SPARQL query "%s"\n' % query)
    if self.do_cache and not no_cache and hash(query) in self.cache:
      return self.cache[hash(query)]

    self.sparql.setQuery(query)
    self.sparql.setReturnFormat(sw.JSON)

    if self.pause_time:
      self.pause()

    try:
      results = self.sparql.query().convert()
    except sw.SPARQLExceptions.QueryBadFormed:
      raise Error('\nBad SPARQL request:\n%s' % (query))
    except (urllib2.URLError, httplib.BadStatusLine), e:
      debug.log(1, 'Connection error during SPARQL request to "%s": %s\n' %
                (self.endpoint_uri, e))
      if retries is None:
        retries = self.retry_count
      if retries > 0:
        debug.log(1, 'Retrying in %ss (retries left: %s)...' % (self.retry_pause,
                                                                retries))
        time.sleep(self.retry_pause)
        return self.query(query, no_cache, retries - 1)
      else:
        raise Error('Connection retry count not configured or exceeded, '
                    'aborting on error: %s\n' % e)

    if self.pause_time:
      self.last_query_time = time.time()

    if self.do_cache and not no_cache:
      self.cache[hash(query)] = results['results']

    return results['results']

  def hash_key(self):
    return hash((self.id, self.endpoint_uri, self.graph or ''))

  def pause(self):
    if self.last_query_time is None:
      return

    pause_time = self.pause_time + self.last_query_time - time.time()
    if pause_time > 0:
      time.sleep(pause_time) 

  def cache_file(self):
    return '%s%squery_%s' % (config.cache_dir, os.sep, self.hash_key())

  def write_cache_to_disk(self):
    debug.log(2, 'Writing SPARQL query cache for "%s" to disk...' %
              self.id)
    f = gzip.open(self.cache_file() + '.tmp', 'wb')
    cPickle.dump(self.cache, f)
    f.close()
    os.rename(self.cache_file() + '.tmp', self.cache_file())
    debug.log(2, 'done.\n')
  
  def read_cache_from_disk(self):
    try:
      f = gzip.open(self.cache_file(), 'rb')
    except:
      return {}

    debug.log(2, 'Reading SPARQL query cache for "%s" from disk...' %
              self.id)
    cache = cPickle.load(f)
    f.close()
    debug.log(2, 'done.\n')
    return cache


class CrawlerDataset(object):
  def __init__(self, resource, var):
    self.resource = resource
    self.graph = Graph()
    self.var = var

    self.crawled_resources = []
    self.crawl(resources=[resource], depth=0)

  def crawl(self, resources, depth):
    #print 'Depth: %s' % depth
    for resource in resources:
      #print "Parsing %s..." % resource
      #print "Objects: %s\n" % self.crawled_resources

      try:
        if resource.find('#') != -1:
          uri = resource.split('#')[0] + '.rdf'
          self.graph.parse(uri)
        else:
          self.graph.parse(resource)
      except urllib2.HTTPError:
        #print "Could not parse %s" % resource
        pass

      self.crawled_resources.append(resource)

    if depth > 0:
      resources = [str(x) for x in self.graph.objects()
                   if type(x) == rdflib.URIRef and
                   x not in self.crawled_resources]
      self.crawl(resources, depth - 1)

  def query(self, query):
    raw_results = self.graph.query(query, initNs=dict())
    results = []
    for raw_result in raw_results:
      result = { 'value': str(raw_result[0]) }
      if type(raw_result[0]) is rdflib.URIRef:
        result['type'] = 'uri'
      elif type(raw_result[0]) is rdflib.Literal:
        # TODO: preserve language and xsd datatype
        result['type'] = 'literal'
      results.append({self.var: result})

    return {'bindings': results}

  def resources(self):
    return [self.resource]

  def get_graph(self):
    return None

class Dataset(object):
  def __init__(self, datasource, var, restrict=None):
    self.datasource = datasource
    self.var = var
    self.restrict = restrict

  def resources(self):
    resources = self.resources_from_cache()
    if resources is not None:
      debug.log(2, 'Using cache...')
      return resources

    debug.log(2, 'No resource list cache (%s) found, using query...' %
              self.hash_key())

    limit = self.datasource.page_size
    offset = 0

    prefixes = ''
    for prefix in config.prefixes:
      prefixes += 'PREFIX %s: <%s>\n' % (prefix, config.prefixes[prefix])

    resources = []

    graph_clause = ''
    if self.datasource.graph is not None:
      graph_clause = 'FROM <%s>' % self.datasource.graph

    while True:
      results = self.datasource.query("""
        %s
        SELECT DISTINCT ?%s
        %s
        WHERE {
          ?%s ?%s_p ?%s_o .
          %s
        } LIMIT %s OFFSET %s
      """ % (prefixes, self.var, graph_clause, self.var, self.var, self.var,
             self.restrict or "", limit, offset), no_cache=True)

      if results['bindings']:
        debug.log(2, '[%s]' % (offset + len(results['bindings'])))

      if len(results['bindings']) == 0:
        break
      offset += limit
      resources += [b[self.var]['value'] for b in results['bindings']]

    self.resources_to_cache(resources)
    return resources

  def hash_key(self):
    return hash((self.datasource.hash_key(), self.restrict or ''))

  def cache_file(self):
    return '%s%sresources_%s' % (config.cache_dir, os.sep, self.hash_key())

  def resources_from_cache(self):
    try:
      cache = open(self.cache_file(), 'r')
      resources = cache.readlines()
      cache.close()
      return [rsc.strip() for rsc in resources]
    except IOError:
      return None

  def resources_to_cache(self, resources):
    cache = open(self.cache_file(), 'w')
    for rsc in resources:
      cache.write('%s\n' % rsc)
    cache.close()

  def query(self, query):
    return self.datasource.query(query)

  def get_graph(self):
    return self.datasource.graph
