"""
Created on 10-Dec-2009

@author: dexen
"""

from dexen.system import util
from dexen.system.base import service
from dexen.system.base.spec import DexenSpec
from dexen.system.base.task import OutputUnit, InputUnit, TaskInfo
from dexen.system.node.master.ind_mgr import IndMgr
from dexen.system.node.master.taskq_mgr import TaskQMgr, TaskState
from dexen.system.server.info import ExecInfo, SlaveInfo
import logging
import multiprocessing as mp
import os
import rpyc
import sys
import time



#===============================================================================
# Globals
#===============================================================================
logger = None
    

#===============================================================================
# Slave Proxy
#===============================================================================
class SlaveProxy(object):
    def __init__(self, name, spec, addr):
        self.name = name
        self.info = SlaveInfo(name)        
        self.input = None
        self.output = None
        try:
            self.conn = rpyc.connect(addr[0], addr[1],
                                     config=service.default_config)
            self.conn.root.run(spec)
        except EOFError:
            self._crashed()
    
    def is_crashed(self):
        return self.info.is_crashed()
    
    def is_idle(self):
        return self.info.is_idle() 

    def is_executing(self):
        return self.info.is_executing()

    def _crashed(self):
        self.info.crashed()

    def execute(self, input_unit):
        assert isinstance(input_unit, InputUnit)
        try:
            self.input = input_unit 
            self.info.executing()
            #print "I am putting", input_unit.task_name, len(input_unit.inds), "into", self.name
            self.conn.root.put(input_unit) # Might need id to match the tasks!
        except EOFError:
            self._crashed()
            
    def update(self):    
        try:
            output = self.conn.root.get()
            self.output = rpyc.classic.obtain(output) # beware output can be none                
            if self.output != None:
                """ 
                Note that the completed means either the script finished success
                fully or crashed
                
                The script crashed is different from slave crashed
                  
                """
                self.info.idle()
        except EOFError:
            self._crashed()
            
    def get_info(self):
        return self.info
    
    def get_output(self):
        return self.output
    
    def get_input(self):
        return self.input
     
#===============================================================================
# Slave Manager
#===============================================================================
class SlaveMgr(mp.Process):
    def __init__(self, spec, comm, reg_addr):        
        mp.Process.__init__(self)
        self.spec = spec
        assert isinstance(self.spec, DexenSpec)
        self.comm = comm
        self.reg_addr = reg_addr
        self.slaves = {} # proxies
        self.ind_mgr = IndMgr(self.spec)
        self.tq_mgr = TaskQMgr(self.ind_mgr)
        self.transact_id = 1
        
        global logger, slv_logger
        logger = logging.getLogger("SlaveMgr")
        print "Current directory in slavemgr process is ", os.getcwd()
        if os.path.exists("output.csv"):
            os.remove("output.csv")
        handler = logging.FileHandler("output.csv")
        fmt = logging.Formatter("%(name)s,%(levelname)s,%(message)s,%(asctime)s", "%H:%M:%S")
        handler.setFormatter(fmt)
        handler.setLevel(logging.DEBUG)
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)
        
        handler.setFormatter(logging.Formatter("%(message)s"))
        msg = "name,levelname," + self.tq_mgr.get_header() + "," +\
              "num_births,task_name,dir,slave_name,trans_id,time"
        logger.debug(msg)
        handler.setFormatter(fmt)
        
        slv_logger = logging.getLogger("Slaves")
        if os.path.exists("slaves_out.log"):
            os.remove("slaves_out.log")
        handler = logging.FileHandler("slaves_out.log")
        fmt = logging.Formatter("%(message)s")
        handler.setFormatter(fmt)
        handler.setLevel(logging.DEBUG)
        slv_logger.addHandler(handler)
        slv_logger.setLevel(logging.DEBUG)
        
    def _setup_schema(self):
        sys.path.append(self.spec.masterdir)
        util.unzip(self.spec.schema_zip, self.spec.masterdir) # TODO: consider folder structure
        
    def _init_job(self):
        """
        Executes the init task and puts the output into the queues.
        Thus, this method kickstarts the job execution
        """
        #logger.debug("scheduler->init(), %s", self.spec.tasks)
        masterdir = self.spec.masterdir
        task = self.spec.init_task
        assert isinstance(task, TaskInfo)
        # assert task is init type TODO:
        prevpath = os.getcwd()
        os.chdir(masterdir)                    
        obj = task.get_obj()
        s_time = time.time()
        inds = obj.__initialize__()
        exec_time = time.time() - s_time
        #o_unit = OutputUnit("MASTER", task.name, inds, exec_time, "", 0)
        self.tq_mgr.set_initial_inds(task.name, inds)
        os.chdir(prevpath)
        #logger.debug("the end of init(spec), %s", self.tq_mgr)
    
    def _recover(self, slave):
        """
        This function is called from 3 places.
        the first connection (during proxy object creation), execute, schedule
        """
        input = slave.input
        """ 
        input is None only when the crash occurs during proxy creation
        """
        if input != None: 
            assert isinstance(input, InputUnit)
            self.tq_mgr.recover_inds(input.task_name, input.inds)
        slave_info = slave.get_info()
        self.comm.update_slave(slave_info)
                       
    def _get_slave_addr(self, slave_name):
        conn = rpyc.connect(self.reg_addr[0], self.reg_addr[1])
        addr = conn.root.get_slave_addr(slave_name)
        conn.close()
        return addr
    
    def _schedule(self, slave_name, input_unit):
        assert isinstance(input_unit, InputUnit)
        if not self.slaves.has_key(slave_name): 
            addr = self._get_slave_addr(slave_name)
            slave = SlaveProxy(slave_name, self.spec, addr)
            if slave.is_crashed():
                self._recover(slave)
            self.slaves[slave_name] = slave
        
        slave = self.slaves[slave_name]
        assert isinstance(slave, SlaveProxy)
        
        #print "executing", slave.name, time.time()
        #logger.debug("executing %s %f", slave.name, time.time())
        slave.execute(input_unit)
        if slave.is_crashed():
            self._recover(slave)

    # TODO: cater for crashes
    def _update_slaves(self):
        #print "updating slaves"
        for slave in self.slaves.values():
            assert isinstance(slave, SlaveProxy)
            
            if not slave.is_executing():
                continue

            slave.update()
    
            if slave.is_executing():
                # This means that after querying the slave, the slave hasn't 
                # finished executing the task.
                continue
            
            if slave.is_crashed():
                """ Note that the slave.inds is tuple (task_name, inds)
                This is not the crash of task, this is the crash of slave
                The info about task crash is kept in the outputunit 
                """
                print slave.name, "crashed"
                self._recover(slave)
                self._log(slave.input.task_name, "died", slave.name)

            if slave.is_idle():
                """
                If the slave is in idle state, it means that the proxy has just
                got back the results pending to be processed
                """
                o_unit = slave.output
                i_unit = slave.input
                assert isinstance(o_unit, OutputUnit)
                """
                TODO: you may want to analyze the contents of o_unit 
                (e.g. crashed or not execution time, vs..)
                """

                print slave.name, "has become idle", time.time()

                exec_info = ExecInfo(o_unit.task_name)
                
                if o_unit.is_crashed():
                    """
                    TODO: you might wanna recover the inds
                    """
                    exec_info.crashed()
                    self.tq_mgr.recover_inds(i_unit.task_name, i_unit.inds)
                    self._log(o_unit.task_name, "crashed", o_unit.slave_name, o_unit.transact_id)
                else:  
                    self.tq_mgr.set_inds(o_unit)
                    self._log(o_unit.task_name, "received", o_unit.slave_name, o_unit.transact_id)
                    exec_info.set_exec_time(o_unit.get_exec_time())

                print "The current num birth is", self.ind_mgr.n_cur_births()
                self._log_slave(o_unit)
                slave_info = slave.get_info()
                assert isinstance(slave_info, SlaveInfo)
                slave_info.exec_info = exec_info
                self.comm.update_slave(slave_info)
    
    def _log_slave(self, o_unit):
        assert isinstance(o_unit, OutputUnit)
        slv_logger.debug(o_unit)
    
    def _log(self, t_name, direction, slv_name, trans_id):
        t_states_msg = self.tq_mgr.get_states_msg()
        n_births = self.ind_mgr.n_cur_births()
        msg = t_states_msg + "," + str(n_births) + "," + t_name + "," + \
              direction + "," + slv_name + "," + str(trans_id)
        logger.debug(msg)
    
    def run(self):
        self._setup_schema()
        self._init_job()
        
        is_stop = False
        
        while True:
            self._update_slaves()
            
            if not self.tq_mgr.has_inds():
                continue
            if not self.comm.has_unit():
                self.comm.has_unit(True)
            slave_name = self.comm.get_slave()
            if not slave_name: 
                continue

            in_unit = self.tq_mgr.get_inds()
            in_unit.transact_id = self.transact_id
            self.transact_id += 1
            
            self._schedule(slave_name, in_unit)    
            self._log(in_unit.task_name, "sent", slave_name, in_unit.transact_id)

            if self.ind_mgr.n_cur_births() >= self.spec.max_num_births:
                is_stop = True
                break
                #self.comm.stopping()
          
        inds = self.ind_mgr.get_all_inds()
        
        print "curdir is", os.getcwd()
        f = open("pareto.csv", "w")
        f.write("id,daylight,energy,cost,birth_id,death_id\n")
        
        inds_dict = {}
        max_id = -1
        
        for ind in inds:
            if ind.score_daylight == None:
                continue
            inds_dict[ind.__id__] = ind
            if max_id < ind.__id__:
                max_id = ind.__id__
            if ind.__death_id__ == None:
                ind.__death_id__ = self.spec.max_num_births+1
            msg = str(ind.__id__) + "," +\
                  str(round(ind.score_daylight,2)) + "," +\
                  str(round(ind.score_energy,2)) + "," +\
                  str(round(ind.score_cost,2)) + "," +\
                  str(ind.__birth_id__) + "," +\
                  str(ind.__death_id__) + "\n"
            f.write(msg) 
        f.close()

        f = open("progress.csv", "w")
        cur_id = 1
        step = 100
        INF = 1000000000
        f.write("id,min_d,avg_d,max_d,min_e,avg_e,max_e,min_c,avg_c,max_c\n")
        
        def calc(cur_id, inds, score_name):
            min = INF
            max = -INF
            sum = 0.0
            cnt = 0.0

            for ind in inds:
                if ind.__birth_id__ <= cur_id and ind.__death_id__ > cur_id:
                    score = getattr(ind, score_name)
                    sum += score
                    cnt += 1
                    if score < min:
                        min = score
                    if score > max:
                        max = score
            
            return min, sum/cnt, max
        
        score_names = ["score_daylight", "score_energy", "score_cost"]
        
        while cur_id < max_id:
            msg = str(cur_id) + ","
            for score_name in score_names:
                min, avg, max = calc(cur_id, inds, score_name)
                msg += str(round(min,2)) + "," +\
                       str(round(avg,2)) + "," +\
                       str(round(max,2)) + ","
            msg = msg[:-1] + "\n"
            f.write(msg)
            cur_id += step
        f.close()
