# sysinfo periodicaly collected, sent to server (indirectly later, for security and to avoid installing pymongo on a factotum)
# also, reports on waiting,executing,completed,failed tasks
# thus making facktotum's classad, matching tasks are to receive from server

#!/usr/bin/python
import sys, os, time
def addpath(s): os.path.exists(s) and sys.path.insert(0, s)
addpath('/ddb/lib')
#################

MONGO_SERVER = 'h1'
#MONGO_SERVER = 'localhost'

DEFAULT_DIR = None

PERIOD_SEND_SYSINFO   = 3*60
PERIOD_NEW_TASK_CHECK = 5

import paste.reloader as reloader
reloader.install()


from sysinfo import collect_sysinfo, system, MACHINE_ID

from asyncproc import Process
from select import select
import signal
import pymongo
from pymongo.connection import Connection
from pymongo.objectid import ObjectId


DB = Connection(MONGO_SERVER, 27017).tito

#DB.drop_collection("monitor")
#DB.drop_collection("progress")
try:
    assert DB.progress.options() == {}
#    assert DB.progress.options()['capped'] == True
#    assert DB.progress.options()['size'] == 100*1024*1024
#    assert DB.progress.options()['max'] == 1000000
    print 'progress.count', DB.progress.count()
except:
    print 'drop progress', DB.progress.options()
    DB.drop_collection("progress")
    DB.create_collection("progress", {})
#    DB.create_collection("progress", {'capped': True, 'max':1000000, 'size':100*1024*1024})

try:
    assert DB.monitor.options()['capped'] == True
    assert DB.monitor.options()['size'] == 100*1024*1024
    assert DB.monitor.options()['max'] == 1000000
    print 'monitor.count', DB.monitor.count()
except:
    print 'drop monitor', DB.monitor.options()
    DB.drop_collection("monitor")
    DB.create_collection("monitor", {'capped': True, 'max':1000000, 'size':100*1024*1024})

DB.progress.ensure_index([('task_id', pymongo.ASCENDING),
                          ('time',    pymongo.ASCENDING)])

DB.tasks.ensure_index([('machine_id', pymongo.ASCENDING),
                       ('status',     pymongo.ASCENDING)])



MYTASKS = {}



def progress(task, **kw):

    # BSON documents are limited to 4 MB
    if len(kw['stdout'])>1024*1024 or len(kw['stderr'])>1024*1024:
        kw1 = kw.copy()
        kw1['stdout'], kw1['stderr'] = kw['stdout'][:1024*1024], kw['stderr'][:1024*1024]
        progress(task, **kw1)
        kw1['stdout'], kw1['stderr'] = kw['stdout'][1024*1024:], kw['stderr'][1024*1024:]
        progress(task, **kw1)
    else:
        if isinstance(task, dict):
            task = task['_id']
        if not isinstance(task, ObjectId):
            raise TypeError
        #kw['task'] = DBRef('tasks', task)
        kw['task_id'] = str(task)
        kw['time'] = time.time()
        DB.progress.save(kw)


def docycle():
    last_sysinfo = 0
    last_check = 0
    while True:
        now = time.time()

        if now >= last_sysinfo+PERIOD_SEND_SYSINFO:
            last_sysinfo = now
            si = collect_sysinfo()
            #print si
            DB.monitor.save(si)
            #print DB.monitor.count()

        if now >= last_check+PERIOD_NEW_TASK_CHECK:
            last_check = now

            TASKSET = MYTASKS.copy()

            for task in DB.tasks.find({'machine_id' : MACHINE_ID}):

                if task['status']=='new':
                    if (task.get('cmd','') or task.get('binary','')):

                        if task.get('cmd',''):
                            proc = Process( task['cmd'],
                                            env = task.get('env', {}) or None, # empty string -> None
                                            cwd = task.get('cwd', '') or DEFAULT_DIR, # empty string -> None
                                            stdin = None,
                                            shell = True)
                            MYTASKS[task['_id']] = (task, proc, now)

                            task['status'] = 'taken'
                            task['starttime'] = now
                            DB.tasks.save(task)
                elif task['status']=='taken':
                    try:
                        del TASKSET[ task['_id'] ]
                    except KeyError:
                        # DB has info about some live tasks which are not in MYTASKS, report it
                        task['status'] = 'lost'
                        DB.tasks.save(task)

            # task are not in 'taken' (or even erased from DB) then kill and forget it
            for task, proc, _ in TASKSET.itervalues():
                proc.terminate()
                #print 'pid=', proc.pid()
                #os.system("ps -o pid,ppid,pgrp,cmd")
                del MYTASKS[task['_id']]


        print now, [task['cmd'] for task, _, _ in MYTASKS.values()]

        #    if len(MYTASKS)==0:
        #        break

        for task, proc, lastreport in MYTASKS.values():
             # check to see if process has ended
            exitcode = proc.wait(os.WNOHANG)
            if exitcode==None: # still working
                out, err = proc.readboth()
                #print len(out), len(err)
                # todo: report numthreads, numchildren, memory, ...
                if out or err or time.time() >= lastreport+10:
                    MYTASKS[task['_id']] = (task, proc, now)
                    progress(task, what='working',
                                   stdout = out,
                                   stderr = err)
            else:
                out, err = proc.readboth()
                progress(task, what='finished',
                               exitcode=exitcode,
                               stdout = out,
                               stderr = err)
                task['status'] = exitcode
                task['endtime'] = now
                DB.tasks.save(task)
                del MYTASKS[task['_id']]

        time.sleep(1)


        # TODO: periodically take process metrics (memory, network activity, etc) and report them



os.setpgrp() # create new process group, become its leader
assert os.getpgrp() == os.getpid()
os._exit = lambda _: genocide() # for paste.reloader
import atexit
atexit.register(lambda: os.killpg(0, signal.SIGKILL))

docycle()
