require 'commands'
require 'cpmonitor'
require 'set'
require 'monitor'
require 'cputil'
require 'algo'

class MIN_WIDTH < PMA

  def MIN_WIDTH.get_desc
    "Nodes will be powered on if there are tasks in the queue. " +
      "More tasks in the queue - more active nodes."
  end

  #
  # Performs power management decision
  #
  # arguments:
  # sleep_nodes - array of nodes that were powered off by the manager
  # node_wait_list - array of JobsWaitNodes objects
  #
  def manage(sleep_nodes, node_wait_list)
    decision = PMDecision.new
    width_required = 0
    job_list = Cmd.instance.get_queued_jobs
    unless job_list.empty?
      CPMonitor.instance.log_message("#{job_list.size} waiting jobs in queue")
      for job in job_list
        info = Cmd.instance.get_job_details(job)
        CPMonitor.instance.log_job_wait_time(job,info.get_qwait)
        if(wl = job_in_wait_list(job, node_wait_list))
          CPMonitor.instance.log_message("Skipping job #{job}, it waits for nodes #{wl.nodes.inspect}")
        else
          width_required += require_cores(info, job, decision)
        end
      end
    end
    power_on(width_required, decision, sleep_nodes, node_wait_list)
    CPMonitor.instance.log_message("Total: #{decision.nodes_on.count} nodes to " +
        "switch on, #{decision.nodes_sleep.count} - off")
    return decision
  end

  private

  # Returns number of cpus
  def require_cores(job_info, job, decision)
    CPMonitor.instance.log_message("Need #{job_info.tasks} cores for job #{job_info}")
    decision.jobs_unlock << job
    return job_info.tasks
  end

  def job_in_wait_list(job, node_wait_list)
    for wl in node_wait_list
      return wl if wl.jobs.include?(job)
    end
    return nil
  end

  def node_in_wait_list(node, node_wait_list)
    for wl in node_wait_list
      return wl if wl.nodes.include?(node)
    end
    return nil
  end

  def power_on(width_required, decision, sleep_nodes, node_wait_list)
    free_nodes = Cmd.instance.get_free_nodes
    if Settings.instance.get('USE_EXISTING_OFF_NODES')
      sleep_node_avail = Cmd.instance.get_sleep_nodes
    else
      sleep_node_avail = Cmd.instance.get_sleep_nodes & sleep_nodes
    end
    width_required = width_required / Cmd.instance.get_cluster_ppn + ((width_required % Cmd.instance.get_cluster_ppn != 0) ? 1 : 0)
    ignored = Cmd.instance.ignored_nodes_online
    on_count = width_required - free_nodes.size - ignored

    CPMonitor.instance.log_message("Need total #{width_required} nodes. " +
        "Have #{free_nodes.size} free nodes, "+
        "#{sleep_node_avail.size} offline nodes," +
        " #{ignored} ignored online nodes.")

    if on_count > 0
      # Switch sleeping nodes on
      for node in sleep_node_avail
        break if on_count == 0
        decision.nodes_on << node
        on_count -= 1
      end
    end

    if on_count < 0
      on_count = -1 * on_count
      # Switch free nodes off except reserved and nodes in wait list
      rnodes = Cmd.instance.get_reserved_nodes
      for node in free_nodes
        break if on_count == 0

        if rnodes[node] == nil
          node_not_reserved = true
        else
          CPMonitor.instance.log_message("Leave node #{node} on " +
              "due to reservation #{rs}")
        end

        if node_not_reserved && (wl = node_in_wait_list(node, node_wait_list)) == nil
          node_not_in_wait_list = true
        else
          CPMonitor.instance.log_message("Leave node #{node} on " +
              "because jobs #{wl.jobs.inspect} are waiting for it.")
        end

        if node_not_reserved && node_not_in_wait_list && Cmd.instance.get_node_idle_time(node) >= Settings.instance.get('MIN_WIDTH_IDLE_TIME').to_i
          node_idle_timeout_passed = true
        else
          CPMonitor.instance.log_message("Leave node #{node} on " +
              "because not enough idle time has passed.")
        end

        if node_not_reserved && node_not_in_wait_list && node_idle_timeout_passed
          decision.nodes_sleep << node
          on_count -= 1
        end
      end
    end
  end
end
