import os, sys
import subprocess
import datetime
import time
import thread
import time

from django.db import models
from django.utils.encoding import smart_unicode

from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType

from django.conf import settings

import logging

logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.INFO)


STATUS_TABLE = [('defined', 'ready to run'),
                ('scheduled', 'scheduled'),
                ('running', 'in progress',),
                ('requested_cancel', 'cancellation requested'),
                ('cancelled', 'cancelled'),
                ('successful', 'finished successfully'),
                ('unsuccessful', 'finished with error'),
                #('timedout', 'timed out'),
                ]

LOG = logging.getLogger("damnjobs")


class JobQuerySet(models.query.QuerySet):
  def __init__(self, *args, **kwargs):
    super(JobQuerySet, self).__init__(*args, **kwargs)

  def get(self, *args, **kwargs):
    print 'JobQuerySet GET', args, kwargs
    if 'model' in kwargs:
      content_type = ContentType.objects.get_for_model(kwargs['model'])
      kwargs['content_type'] = content_type
      del kwargs['model']
    return super(JobQuerySet, self).get(*args, **kwargs)
    
  def filter(self, *args, **kwargs):
    print 'JobQuerySet FILER', args, kwargs
    if 'model' in kwargs:
      content_type = ContentType.objects.get_for_model(kwargs['model'])
      kwargs['content_type'] = content_type
      del kwargs['model']
    return super(JobQuerySet, self).filter(*args, **kwargs)

class JobManager(models.Manager): 
  def get_query_set(self):
    return JobQuerySet(self.model)
    
  def get(self, *args, **kwargs):
    return self.get_query_set().get(*args, **kwargs)
    
  def filter(self, *args, **kwargs):
    return self.get_query_set().filter(*args, **kwargs)
    
  def _exec_task(self, job_id):
    try:
      job = self.get(pk=job_id)
      the_method =  getattr(job.content_object, job.method)

      the_method()
    finally:
      import sys
      sys.stdout.flush()
      sys.stderr.flush()
    
  def create_job(self, object, method, status_in=None):
    if not getattr(object, method, None):
      raise Exception("%s does not have a method %s" % (object.__class__.__name__, method))
    if not status_in:
      status_in = dict(STATUS_TABLE).keys()
    type = ContentType.objects.get_for_model(object)
    print 'TYPE', type, object
    job, created = self.get_or_create(content_type=type, 
                                       object_id=object.id,
                                       method=method,
                                       status__in=status_in)

    return job
   
  def _run(self, nrOfWorkers):
    LOG.info("Scheduler started")
    t = time.time()
    workers = []
    while True:
      time.sleep(1)
      try:
        # First cancel any job that needs to be cancelled...
        jobs = self.filter(status="requested_cancel")
        for job in jobs:
          LOG.info("Cancelling job %d...", job.pk)
          job._cancel()
          LOG.info("...job %d cancelled.", job.pk)
          
        if not settings.DEBUG and (time.time() - t > 60): # Check every 60 seconds
          t = time.time()
          LOG.info("Cleaning finished jobs...")
          yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
          jobs = self.filter(status="successful").exclude(end_date__gte=yesterday)
          for job in jobs:
            LOG.info("Deleting job %d...", job.pk)
            job.delete()
            LOG.info("...job %d deleted.", job.pk)
          LOG.info("...Cleaned finished jobs.")
        
        #Check for finished workers   
        for i, w in enumerate(workers): 
          if w.is_alive():
            w.join(1) 
          if not w.is_alive(): 
            workers.remove(w)  
            LOG.info("Removed (worker %s)...", i)
        
        #Do our chores.
        jobs = self.filter(status="scheduled")
        while len(workers) < nrOfWorkers and jobs.count() > 0:
          job = jobs[0]
          LOG.info("Starting job %s (worker %s)...", job.pk, len(workers))
          w = job._run()
          workers.append(w)
          w.start()
          LOG.info("...job %s (worker %s) started.", job.pk, len(workers)-1)
          jobs = self.filter(status="scheduled").exclude(id=job.id)
            
      except:
        LOG.exception("Scheduler exception")



          
class Job(models.Model):
  objects = JobManager()

  content_type = models.ForeignKey(ContentType)
  object_id = models.CharField(max_length=40, db_index=True)
  content_object = generic.GenericForeignKey()
  
  method = models.CharField(max_length=200)
  
  pid = models.IntegerField(null=True, blank=True)

  start_date = models.DateTimeField(null=True, blank=True)
  end_date = models.DateTimeField(null=True, blank=True)

  status = models.CharField(max_length=200,
                            default="defined",
                            choices=STATUS_TABLE,
                            )
  description = models.CharField(max_length=100, default='', null=True, blank=True)
  stdout = models.TextField(default='', null=True, blank=True)
  stderr = models.TextField(default='', null=True, blank=True)
  
  def append_stdout(self, log):
    if log:
      # not possible to make it completely atomic in Django, it seems
      rowcount = Job.objects.filter(pk=self.pk).update(stdout=(Job.objects.get(pk=self.pk).stdout + log))
      if rowcount == 0:
          raise Exception(("Failed to save stdout for job %d, job does not exist; stdout was:\n" % pk) + log)
          
  def append_stderr(self, log):
    if log:
      # not possible to make it completely atomic in Django, it seems
      rowcount = Job.objects.filter(pk=self.pk).update(stderr=(Job.objects.get(pk=self.pk).stderr + log))
      if rowcount == 0:
          raise Exception(("Failed to save stderr for job %d, job does not exist; stderr was:\n" % pk) + log)
  
  def _compute_duration(self):
    if self.start_date and self.end_date:
      delta = self.end_date - self.start_date
      min, sec = divmod((delta.days * 86400) + delta.seconds, 60)
      hour, min = divmod(min, 60)
      str = ((hour, 'hour'), (min, 'minute'), (sec, 'second'))
      return ', '.join(['%d %s%s' % (x[0], x[1],'s' if x[0] > 1 else '')
                        for x in str if (x[0] > 0)])

  duration = property(_compute_duration)


  def schedule(self):
    self.status = 'scheduled'
  
  def _set_status(self, new_status, existing_status):
    if isinstance(existing_status, str):
      existing_status = [ existing_status ]
        
    if existing_status:
        rowcount = Job.objects.filter(pk=self.pk).filter(status__in=existing_status).update(status=new_status)
    else:
        rowcount = Job.objects.filter(pk=self.pk).update(status=new_status)
    return rowcount != 0
  
  
  def _mark_start(self, pid):
    # Set the start information in all cases: That way, if it has been set
    # to "requested_cancel" already, it will be cancelled at the next loop of the scheduler
    rowcount = Job.objects.filter(pk=self.pk).update(pid=pid, start_date=datetime.datetime.now())
    if rowcount == 0: raise Exception("Failed to mark job with ID %d as started, job does not exist" % pk)
  
  def _mark_finished(self, new_status, existing_status):
    rowcount = Job.objects.filter(pk=self.pk).filter(status=existing_status).update(status=new_status, end_date=datetime.datetime.now())
    if rowcount == 0:
      LOG.warning('Failed to mark job as finished, from status "%s" to "%s" for job %s. May have been finished in a different thread already.',
                    existing_status, new_status, self.pk)
    else:
      LOG.info('Job %s finished with status "%s"', self.pk, new_status)
 
 
  def _can_run(self):
    return self.status in ["defined",]


  def _run(self):
    if self.status != "scheduled":
      raise Exception("Job not scheduled, cannot run!")
    
    #Clear logs and dates
    Job.objects.filter(pk=self.pk).update(stderr='', stdout='', start_date=None, end_date=None)
    
    import threading
    class Worker(threading.Thread):
      def __init__(self, job):
        threading.Thread.__init__(self)
        self.job = job
        #self.t = threading.Timer(3, self.nuke)
        self.timed_out = False
        
      def run(self):
        returncode = -1
        #self.t.start()
        try:
          if not self.job._set_status("running", "scheduled"): return
          
          from jobs import Execute, Log
          proc = Execute('runjob', self.job.id, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
          self.job._mark_start(proc.pid)
          
          Log(proc, self.job.append_stdout, self.job.append_stderr)

          returncode = proc.returncode
          self.job.append_stdout('\n\nreturncode: '+str(returncode))
        except Exception, e:
          LOG.exception("Exception in calling thread for job %s", self.job.pk)
          try:
            import traceback
            stack = traceback.format_exc()
            self.job.append_stderr("Exception in calling thread: " + str(e) + "\n" + stack)
          except Exception, ee:
            LOG.exception("Second exception while trying to save the first exception to the log for job %s!", self.job.pk)
        
        #self.t.cancel()
        if self.timed_out:
          self.job._mark_finished("timedout", "running")
        else:
          self.job._mark_finished("successful" if returncode == 0 else "unsuccessful", "running")
      '''  
      def nuke(self):
        try:
          if self.job.pid:    
            import signal
            LOG.info('Killing job model=%s, method=%s, object=%s: %s' % (self.job.model, self.job.method, self.job.object_id, str(e)))
            os.kill(self.job.pid, signal.SIGKILL)
            os.kill(self.job.pid, signal.SIGTERM)
          else:
            LOG.info('Failed to KILL job model=%s, method=%s, object=%s: %s' % (self.job.model, self.job.method, self.job.object_id, str(e)))
        except OSError, e:
          raise Exception('Failed to kill job model=%s, method=%s, object=%s: %s' % (self.job.model, self.job.method, self.job.object_id, str(e)))
        finally:
          self.timed_out = True
          sys.exit()
       ''' 
    return Worker(self)


  def _cancel(self):
    if self.status != "requested_cancel":
      raise Exception("Cannot cancel job if not requested")

    try:
      if not self.pid: return    
      import signal
      os.kill(self.pid, signal.SIGTERM)
    except OSError, e:
      # could happen if the process *just finished*. Fail cleanly
      raise Exception('Failed to cancel job model=%s, method=%s, object=%s: %s' % (self.model, self.method, self.object_id, str(e)))
    finally:
      self._mark_finished("cancelled", "requested_cancel")

  def __unicode__(self):
    return 'Job: %s - %s (%s)  [%s]'%(self.model, self.method, self.status, self.content_object)
 
  @property
  def model(self):
    return str(self.content_type)

Daemon_TYPES = [('jobdaemon', 'JobDaemon'),('wsdaemon', 'WSDaemon'),]

class Daemon(models.Model):
  pid = models.IntegerField(null=True, blank=True)
  command = models.CharField(max_length=200, choices=Daemon_TYPES,)
  workers = models.IntegerField(default=1)
  
  stdout = models.TextField(default='', null=True, blank=True)
  stderr = models.TextField(default='', null=True, blank=True)
  
  def append_stdout(self, log):
    if log:
      # not possible to make it completely atomic in Django, it seems
      rowcount = Daemon.objects.filter(pk=self.pk).update(stdout=(Daemon.objects.get(pk=self.pk).stdout + log))
      if rowcount == 0:
          raise Exception(("Failed to save stdout for Daemon %d, Daemon does not exist; stdout was:\n" % pk) + log)
          
  def append_stderr(self, log):
    if log:
      # not possible to make it completely atomic in Django, it seems
      rowcount = Daemon.objects.filter(pk=self.pk).update(stderr=(Daemon.objects.get(pk=self.pk).stderr + log))
      if rowcount == 0:
          raise Exception(("Failed to save stderr for Daemon %d, Daemon does not exist; stderr was:\n" % pk) + log)
    
  def _setpid(self):
    self.pid = os.getpid() 
    self.save()
    
  def _resetpid(self):
    self.pid = None
    self.save()

  def _status(self):
    try:
      os.getsid(self.pid)
      return 'Running'
    except (OSError, TypeError):
      return 'Stopped'
  status = property(_status) 
  
  def _info(self):
    try:
      os.getsid(self.pid)
      import resource
      r = resource.getrusage(resource.RUSAGE_SELF)
      return 'time user mode: %s | time system mode: %s | memory: %s KB'%(r.ru_utime, r.ru_stime, (r.ru_maxrss*resource.getpagesize())/1024)
    except (OSError, TypeError):
      return 'Unknown'
  info = property(_info) 
  
  def start(self):
    if self.pid:
      try:
        os.getsid(self.pid)
        sys.stderr.write("Daemon already running.\n")
        return
      except OSError:
        sys.stderr.write("pid %d set, but daemon is not running. \n" % self.pid)
    
    self.daemonize()
    self.run()

  def stop(self):
    from signal import SIGTERM
    if not self.pid:
      sys.stderr.write("pid %s does not exist, cannot stop daemon.\n" % self.pid)
      return # not an error in a restart
    try:
      while 1:
        os.kill(self.pid, SIGTERM)
        time.sleep(0.1)
    except OSError, err:
      err = str(err)
      if err.find("No such process") > 0:
        self._resetpid()
      else:
          print str(err)
          sys.exit(1)
 
  def restart(self):
    #Clear logs
    #print Daemon.objects.filter(pk=self.pk).update(stderr='', stdout='')
    self.stdout = ''
    self.stderr = ''
    self.stop()
    self.start()
  
  def daemonize(self):
    raise Exception("unimplemented daemonize")
      
  def run(self):
    raise Exception("unimplemented run")


