#!/usr/bin/env python

# (c) 2007-2011 Helsinki University of Technology and University of Helsinki
# Licensed under the MIT license; see LICENSE.txt for more information.

from urlfetch import URLFetcher
from store import Store
from imp import Importer
from map import Mapper
from validate import Validator
from filter import Filter
from content import ContentExtractor
from report import Report
from crawl import Crawler

from threading import Thread, Event
import time
import sys

class Session:
  def __init__(self, config, lookup, model, source, limit=0, report=2, keep=False, cached=False, progress=False, url='', verbose=False, notes=False):
    if verbose:
      print >>sys.stderr, "Creating session for source", source.getName()
    self._config = config
    self._lookup = lookup
    self._model = model
    self._source = source
    self._limit = limit
    self._keep = keep
    self._cached = cached
    self._progress = progress
    self._verbose = verbose
    self._url = url
    self._notes = notes
    
    self._fatalevent = Event()

    self._fetcher = URLFetcher(self)
    self._crawler = Crawler(self)
    self._importer = Importer(self)
    self._mapper = Mapper(self)
    self._validator = Validator(self)
    self._filter = Filter(self)
    self._content = ContentExtractor(self)
    self._store = Store(self)
    self._report = Report(self, report, single=(url != ''))
    
  
  def getConfig(self):
    return self._config
  
  def getStore(self):
    return self._store
  
  def getSource(self):
    return self._source
  
  def getLookupService(self):
    return self._lookup
  
  def getModelService(self):
    return self._model
  
  def getFetcher(self):
    return self._fetcher

  def getReport(self):
    return self._report
  
  def getLimit(self):
    return self._limit
  
  def getKeep(self):
    return self._keep
  
  def getCached(self):
    return self._cached
    
  def getRecurse(self):
    return not self._url
  
  def getStore(self):
    return not self._url
  
  def getDelete(self):
    return (not self._limit and not self._url)
  
  def getUrl(self):
    return self._url
  
  def getVerbose(self):
    return self._verbose
  
  def getNotes(self):
    return self._notes
  
  def getFatalEvent(self):
    return self._fatalevent
    
  def schedule(self, doc):
    # find out what needs to be done next with doc, and pass it to an appropriate worker
    completed = doc.getCompleted()

    if 'import' not in completed:
      self._importer.addWork(doc)
      return

    if 'map' not in completed:
      self._mapper.addWork(doc)
      return

    if 'validate' not in completed:
      self._validator.addWork(doc)
      return
    
    if 'filter' not in completed:
      self._filter.addWork(doc)
      return
    
    if 'content' not in completed:
      self._content.addWork(doc)
      return
    
    if 'store' not in completed:
      self._store.addWork(doc)
      return
  
  def start(self):
    if self._verbose:
      print >>sys.stderr, "Starting session for source", self._source.getName()
    self._thread = Thread(group=None, target=self.run, name='Session')
    self._thread.start()
    
  def run(self):
    self._running = True
    if self._progress:
      self._progressThread = Thread(group=None, target=self.runProgress, name='SessionReport')
      self._progressThread.setDaemon(True)
      self._progressThread.start()
    
    if self._url and self._source.getType() not in ('rdf', 'rdftweaks'):
      # single URL to process
      self._crawler.addWork(self._url)
    else: # process all URLs given in source configuration
      for url in self._source.getUrls():
        self._crawler.addWork(url)
        
    threads_to_shutdown = [
      self._crawler,
      self._importer,
      self._mapper,
      self._validator,
      self._filter,
      self._content,
      self._store,
      self._report
    ]
    
    self._crawler.shutdown()
    self._importer.shutdown()
    self._mapper.shutdown()
    self._validator.shutdown()
    self._filter.shutdown()
    self._content.shutdown()
    self._store.shutdown()
    self._report.shutdown()
    self._running = False
    if self._progress:
      self._progressThread.join()
      self.printProgress()	# print final progress report
    
    if self._fatalevent.isSet():
      print >>sys.stderr, "Session %s: FATAL ERROR, exiting." % self._source.getName()
    
  def printProgress(self):
    print >>sys.stderr, "-" * 80
    print >>sys.stderr, time.strftime('%Y-%m-%d %H:%M:%S'), "Session for source", self._source.getName()
    print >>sys.stderr, "\tphase:\tcrawl\timport\tmap\tvalidat\tfilter\tcontent\tstore\treport"
    print >>sys.stderr, "\tqueue:\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d" % (
      self._crawler.getWorkWaiting(),
      self._importer.getWorkWaiting(),
      self._mapper.getWorkWaiting(),
      self._validator.getWorkWaiting(),
      self._filter.getWorkWaiting(),
      self._content.getWorkWaiting(),
      self._store.getWorkWaiting(),
      self._report.getWorkWaiting()
    )
    print >>sys.stderr, "\tdone:\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d" % (
      self._crawler.getWorkDone(),
      self._importer.getWorkDone(),
      self._mapper.getWorkDone(),
      self._validator.getWorkDone(),
      self._filter.getWorkDone(),
      self._content.getWorkDone(),
      self._store.getWorkDone(),
      self._report.getWorkDone()
    )
    print >>sys.stderr, "First 10 URLs in crawl queue:"
    print >>sys.stderr, "\n".join(self._crawler.getFirstURLs())


  def runProgress(self):
    while self._running:
      self.printProgress()
      time.sleep(5)
    
  def close(self):
    self._thread.join()
    if self._verbose:
      print >>sys.stderr, "Finished session for source", self._source.getName()
  
