#! /usr/bin/python

####
# This is the "master" for jobs dispatching. Eventually it should do the following:
# -. reads in and parse the configuration file;
# -. constructs a job queue; 
# -. listens over a socket port for connection requests from "slaves";
# -. whenever there is a slave requesting, assign brain-pair from the dispatch queue,
# -. whenever there is a slave finishing, do logging/cleaning; and
# -. whenever user checking, reporting the current progress, or dumping the partial computation result.
#
# (Slave management, job management, logging and progress reporting)
#### 

# use __future__ to run in Python 2.5.X 
from __future__ import with_statement 
import sys  
import os
import socket
import ConfigParser
import logging
import Queue
import subprocess

###### message between "master" and "slaves" #####
 
###### parameter parsing #####
if len(sys.argv) < 2:
	print 'usage'
	print '-----'
	print ''
	print 'master config_file'
	print ' '
	print 'Inputs:'
	print '-------'
	print 'The configuration file that discribes the jobs and the dispatching setup.'
	print 'For more information please refer to the in-line comments in the example.conf.'
	sys.exit(1)


###### read in configuration file ##### 
config=ConfigParser.SafeConfigParser()
config_file = os.path.abspath(os.path.expanduser(sys.argv[1]))
if os.path.exists(config_file):
    config.read(config_file)
else:
    print 'config_file does not exist'
    sys.exit(1)

try:
    id = config.get('MASTER','id')
except ConfigParser.NoOptionError:
    id = config_file.split('/')[-1].split('.conf')[0]  

working_dir = os.path.expanduser(config.get('MASTER','working_directory'))
os.chdir(working_dir)

###### preparing log facility #####
# TODO: use logging.config functionality so that all the setting can be read and changed
# in the configuration file

try:
    master_log_file = config.get('MASTER','log_file')
except ConfigParser.NoOptionError:
    master_log_file = config_file.split('/')[-1].split('.conf')[0] 
master_log_file = working_dir+'/'+master_log_file+'.log' 
log_level = config.getint('MASTER', 'log_level')
logger = logging.getLogger("master")
fh = logging.FileHandler(master_log_file,"w")
fh.setLevel(log_level)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
if log_level <= logging.INFO:  # in debugging mode, displace logs to stdout
    ch = logging.StreamHandler()
    ch.setLevel(log_level)
    ch.setFormatter(formatter)
    logger.addHandler(ch)

logger.setLevel(log_level)
logger.debug("master started ...")

###### create jobs queue #####
list1=config.get('MASTER','list1').split(',')
list1=map(lambda image: image.strip(), list1)
list2=config.get('MASTER','list2').split(',')
list2=map(lambda image: image.strip(), list2)

job_queue=Queue.Queue()
for img1 in list1:
    for img2 in list2:
        job_queue.put((img1,img2))

working_on_queue=dict()   # list of jobs dispatched to the "slaves" but not yet finished
                          # elements in this queue should be a pair (slave, job). 

done_queue=Queue.Queue()        # list of jobs finished

fails = dict() # records job failurs, elements in this table if of the form (job, number of fails)
something_wrong_queue=Queue.Queue()  # list of jobs that is known not finished properly

###### handle slave requests #####
# It is probably a fancy way to deal with multiple "slaves" by threads, but to make the
# development/debug simple, I will use a less fancy solution
listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = config.get('MASTER','host')
port = config.getint('MASTER','port')
ntry = config.getint('MASTER','ntry')
i=0
while True:
    try:
        listening_socket.bind((host, port))
    except socket.error:
        logger.debug("tried port: %d, does not work", port)
        i+=1
	if i>=ntry:
            raise
        else:
            port+=1
    else:
        config.set('MASTER','port',port.__str__())
        with open(config_file, 'wb') as config_fd:
            config.write(config_fd)
        break
logger.debug("master chosed port: %d", port)    

# the "master" is supposed to run forever, and only terminated by user signals
# TODO: future plan of using message to "master"
# -. dump out current progress
# -. reread configuration file
# -. ...
 
while True:
    listening_socket.listen(40) # 40 is the upper bound of how many "slaves" can connect to the "master"
    logger.debug("master is listening ...")
    conn, slave = listening_socket.accept()
    logger.debug("slave %s is knocking ...", socket.gethostbyaddr(slave[0])[0])

    # here I should test if the connection request is indeed from one of the "slave".
    #if slave[0] not in slaves_list: 
    #    logger.debug("%s is not a valid slave.", slave[0]) 
    #    conn.close()
    #    continue
    
    # read in the message from the connection "slave" and see what he wants
    msg = conn.recv(1024)
    logger.debug("receiving message: %s", msg) 
    
    if msg == "IMAGE_FILE_NOT_EXIST": 
        job=working_on_queue.pop(slave[0])
        conn.close() 
        logger.critical("%s reports that job (%s, %s) is not well formulated.", socket.gethostbyaddr(slave[0])[0], job[0], job[1])
        raise Exception("%s reports that job (%s, %s) is not well formulated.", socket.gethostbyaddr(slave[0])[0], job[0], job[1])

    elif msg == "STATISTICS":
        # user is asking for the job statistics, report back
        nLeft=job_queue.qsize()
        nWorkingOn=len(working_on_queue)    
        nDone=done_queue.qsize()
        nWrong=something_wrong_queue.qsize()
        logger.info("%d jobs done \n", nDone)
        logger.info("%d jobs left \n", nLeft)
        logger.info("%d jobs failed \n", nWrong)
        logger.info("%d jobs being worked on \n", nWorkingOn)
        for (s,j) in working_on_queue.items():
            logger.info("%s is working on warping %s.",socket.gethostbyaddr(s)[0],j)
        for job in something_wrong_queue.queue: 
            logger.info("%s failed.", job)
        conn.close() 
        continue

    elif msg=="LAUNCH_SLAVES":
        # launch or re-launch slaves
        config.read(config_file)
        slaves_list=config.get('MASTER','slaves').split(',')
        slaves_list=map(lambda name: socket.gethostbyname(name.strip()), slaves_list)  # convert to IP addresses
        working_slaves_list=working_on_queue.keys()
        null_fd=open("/dev/null",'w')
        for slave in slaves_list:
            if slave in working_slaves_list:
                continue
            try:
                subprocess.Popen(["ssh", slave, "slave.py", config_file], stdout=null_fd, stderr=null_fd)
            except ValueError:
                logger.info("%s is not launched.",slave) 
            else:
                logger.info("%s is launched successfully.",slave) 
        conn.close() 
        continue

    elif msg=="RELOAD_JOBS":
        # load newly added jobs 
        list1=config.get('MASTER','list1').split(',')
        list1=map(lambda image: image.strip(), list1)
        list2=config.get('MASTER','list2').split(',')
        list2=map(lambda image: image.strip(), list2)

        for img1 in list1:
            for img2 in list2:
                job_queue.put((img1,img2))

        conn.close() 
        continue
    

    # in the following cases, new job should be assigned
    elif msg == "CANNOT_CREATE_DIR": 
        job=working_on_queue.pop(slave[0])
        if fails.has_key(job):
            fails[job]+=1
        else:
            fails[job]=1 
        logger.error("%s could not start the warping %s", socket.getbostbyaddr(slave[0])[0], job)
        logger.info("%s has failed %d time(s)", job, fails[job])
        if fails[job] >= 5 : 
            something_wrong_queue.put(job)
            fails.pop(job)
        else:
            job_queue.put(job)

    elif msg == "JOB_FAILED":  
        job=working_on_queue.pop(slave[0])
        if fails.has_key(job):
            fails[job]+=1
        else:
            fails[job]=1 
        logger.error("%s started the warping %s, but could not finish", socket.gethostbyaddr(slave[0])[0], job)
        logger.info("%s has failed %d time(s)", job, fails[job] )
        if fails[job] >= 5 : 
            something_wrong_queue.put(job)
            fails.pop(job)
        else:
            job_queue.put(job)
    elif msg == 'JOB_DONE': 
        try:
            job=working_on_queue.pop(slave[0])
        except KeyError: # this slave is asking for his first job
            pass
        else:
            done_queue.put(job)

    else:
        logger.debug("%d can be used in the future.", msg) 
        ############################## SPACE HOLDER: ############################################ 
        # Customized code could be filled in here to handle the return value from the "slaves" .
        # For example, store the return values in a file (or matrix)
        #########################################################################################

    if job_queue.empty():
        # inform the "slave"
        logger.debug("no more job left, waiting for slaves to finish.")
        conn.sendall('NO_MORE_JOB')  # NO_MORE_JOB
    else:
        # send a new job to the "slave"
        job=job_queue.get() # job is supposed to of the form ("image1","image2")
        conn.sendall(job[0]+','+job[1])
        logger.info("sending job %s to slave %s", job, socket.gethostbyaddr(slave[0])[0])
        # save the (job, slave) pair to the working_on_queue
        working_on_queue[slave[0]]=job
    conn.close()

###### clean up and finish #####
listening_socket.close()

############################## SPACE HOLDER: ############################################ 
# Customized code could be filled in below to handle the return value from the "slaves" .
# For example, close files or save the matrix 
#########################################################################################



