#############################################################################
# Copyright (c) 2009 Dmitry Grushin <dgrushin@gmail.com>.
#
# This file is part of clupower (cluster power manager utility)
#
# Clupower is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Clupower is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Clupower.  If not, see <http://www.gnu.org/licenses/>.
#
# Contributors:
#    Dmitry Grushin <dgrushin@gmail.com> - initial API and implementation
#############################################################################

require 'cputil'
require 'monitor'

# job states:
# C - Job is completed after having run
# E - Job is exiting after having run.
# H - Job is held.
# Q - job is queued, eligible to run or routed
# R - job is running.
# T - job is being moved to new location.
# W - job is waiting for its execution time.
# S - (Unicos only) job is suspend.
class JobProperties
  attr_accessor :nodes, :tasks, :walltime, :submit_time, :state, :interactive, :id

  def get_qwait
    # We assume that qstat is executed on the same machine as our script
    return Time.now.to_i - @submit_time
  end

  def to_s
    "(Id=#{@id};Width=#{@nodes};Tasks=#{@tasks};Walltime=#{Utils.human_time(@walltime)};" +
      "Int=#{@interactive};Waits=#{Utils.human_time(get_qwait)})"
  end
end

# A class that holds a list
# of jobs waiting for nodes to power on.
class JobsWaitNodes
  include MonitorMixin

  # jobs - Array, nodes - Array
  def initialize(jobs, nodes)
    super()
    @jobs = jobs
    @nodes = nodes
    @done = false
  end

  # Waits in a new thread for the set of nodes to
  # power on (get free status in torque). Each job
  # is then cleared from hold status. If nodes are not powered on
  # until timeout jobs will take part in next power management
  # decision.
  def wait
    @thread = Thread.new do
      sleep_total = 0
      timeout = Settings.instance.get('WAKEUP_TIME') * 2
      until sleep_total >= timeout || Cmd.instance.get_free_nodes & @nodes == @nodes
        sleep_total += sleep(5)
        reason = "Not all nodes are online: #{(@nodes - Cmd.instance.get_free_nodes).join(',')}"
      end
      # If all nodes are free we check if gm mapper is started
      bad_nodes = []
      until sleep_total >= timeout || Cmd.instance.is_gm_mapper_started(@nodes, bad_nodes)
        sleep_total += sleep(5)
        reason = "Myrinet mapper has not started on nodes #{bad_nodes.join(',')}"
      end
      # If nodes and mapper are ok clear hold status from jobs
      # Some jobs may have been deleted when we were waiting, so check the queue
      # once more and skip deleted jobs.
      if sleep_total < timeout
        existing_jobs = Cmd.instance.get_queued_jobs & @jobs
        existing_jobs.each { |job| release_hold(job)}
      else
        CPMonitor.instance.log_message("Timeout waiting for nodes - " +
            "jobs #{@jobs.inspect}, nodes #{@nodes.inspect}. Reason: #{reason}.")
      end
      synchronize { @done = true }
    end
    return self
  end

  # Removes job hold
  def release_hold(job)
    Cmd.instance.release_job_hold(job)
    CPMonitor.instance.log_job_release(job, @nodes)
  rescue CPRuntimeError => e
    CPMonitor.instance.log_error("Error occured while trying to release job hold #{job}: #{e.message}. " +
        "Job will stay in queue for the next PM iteration.")
  end

  def done
    return @done
  end

  # Wait for the thread to stop
  def join
    CPMonitor.instance.log_message("Wait list competion: #{@jobs.inspect} #{@nodes.inspect}")
    @thread.join
  rescue Exception => e
    CPMonitor.instance.log_error("In JobsWaitNodes.join: #{e.message}")
  end

  attr_accessor :jobs, :nodes
end
