#############################################################################
# Copyright (c) 2009 Dmitry Grushin <dgrushin@gmail.com>.
#
# This file is part of clupower (cluster power manager utility)
#
# Clupower is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Clupower is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Clupower.  If not, see <http://www.gnu.org/licenses/>.
#
# Contributors:
#    Dmitry Grushin <dgrushin@gmail.com> - initial API and implementation
#############################################################################

require 'rubygems'
require 'xmlsimple'
require 'settings'
require 'wol'
require 'wakeup'
require 'job'
require 'errors'
require 'open3'
require 'cpmonitor'
require 'singleton'

class Cmd
  include Singleton

  def initialize
    @wakeup = WakeUp.new
    @free_nodes_idle_timer = {}
  end

  # Returns time since the node became free
  def get_node_idle_time(node)
    node_time = @free_nodes_idle_timer[node]
    return 0 if node_time == nil
    return Time.now.to_i - node_time
  end

  # Gets the list of free nodes and update idle timers
  def update_nodes_idle_timers
    current_free_nodes = Cmd.instance.get_free_nodes
    last_free_nodes = @free_nodes_idle_timer.collect { |k,v|  k}

    (last_free_nodes - current_free_nodes).each do |node|
      @free_nodes_idle_timer.delete(node)
    end

    (current_free_nodes - last_free_nodes).each do |node|
      @free_nodes_idle_timer[node] = Time.now.to_i
    end
  end

  # Executes shell command and returns a string output
  # raises exception if command fails
  def execute_cmd(cmd)
    begin
      stdin, stdout, stderr = Open3.popen3(cmd)
    rescue Exception => e
      raise CPExecError.new(e)
    end
    err = stderr.read
    raise CPExecError.new(err) if ($? != nil && $? != 0) || !err.empty?
    return stdout.read
  end

  # puts the node with the given ID offline
  # Return true if success
  def node_off (id)
    # Tell torque that node is off
    torque_node_offline(id)
    # Send sleep
    execute_on_node(id, Settings.instance.get('CMD_PM_SUSPEND')) unless Settings.instance.get('SIMULATE')
    CPMonitor.instance.log_node_off(id)
  rescue CPRuntimeError => e
    raise CPE11.new(id).set_cause(e)
  end

  # Wakes up node with the given ID
  # Return true if success
  def node_on (id)
    @wakeup.wakeup(id) unless Settings.instance.get('SIMULATE')
    # Tell torque that the node is up
    torque_node_online(id)
    CPMonitor.instance.log_node_on(id)
  rescue CPRuntimeError => e
    raise CPE8.new(id).set_cause(e)
  end

  # Tells torque that node is off
  def torque_node_offline(id)
    execute_cmd(Settings.instance.get('CMD_PBSNODES') + " -o " + id)
  rescue CPExecError => e
    raise CPE12.new(id).set_cause(e)
  end

  # Tells torque that node is up
  def torque_node_online(id)
    execute_cmd(Settings.instance.get('CMD_PBSNODES') + " -r " + id)
  rescue CPExecError => e
    raise CPE9.new(id).set_cause(e)
  end

  def get_free_nodes
    return get_status_nodes("free")
  end

  def node_free?(node)
    get_free_nodes & [node] == [node]
  end

  def get_sleep_nodes
    return get_status_nodes("offline")
  end

  def get_down_nodes
    return get_status_nodes("down")
  end

  #
  # Returns a list of nodes that are used by
  # active (already started) reservations.
  #
  # returns Hash(node id, reservation id)
  def get_reserved_nodes
    pipe = execute_cmd(Settings.instance.get('CMD_SHOWRES') + " -n")
    lines = pipe.split("\n")
    result = Hash.new
    for line in lines[4..lines.size-2]
      tokens = line.strip.split
      node = tokens[0]
      start = tokens[5]
      res_id = tokens[2]
      start_time = 0
      k = 3600
      for t in start.split(":") do
        val = t.to_i
        val = val * -1 if t[0,1] == "-"
        start_time = start_time + val * k
        k = k / 60
      end
      start_time = (start_time * -1) if (start[0,1] == "-")
      if (start_time < Settings.instance.get('WAKEUP_TIME') + Settings.instance.get('SLEEP_PAUSE'))
        result[node] = res_id
      end
    end
    return result
  rescue CPExecError => e
    raise CPE20.new.set_cause(e)
  end

  #
  # Returns a list of jobs in queue. Each job is an array [id, status]
  #
  # Returns [[job id, job status], ...]
  def get_queue
    jobs = []
    pipe = execute_cmd(Settings.instance.get('CMD_QSTAT')).split("\n").drop(2)
    for line in pipe
      tokens = line.split(/\s+/)
      #get first part of id and status
      jobs << [tokens[0].split(/\./)[0], tokens[4]]
    end
    return jobs
  rescue CPExecError => e
    raise CPE7.new.set_cause(e)
  end

  # Returns an array of job ids that have status 'Q'
  def get_queued_jobs
    jobs = []
    for j in get_queue
      if j[1] == 'Q'
        jobs << j[0]
      end
    end
    return jobs
  end

  #
  # Executes command on node via ssh and returns stdout
  #
  def execute_on_node(node_id, cmd)
    return execute_ssh_cmd(node_id, cmd)
  end

  # Returns number of cpus on one node
  def get_cluster_ppn
    nodes_data = parse_pbsnodes
    for node in nodes_data['Node']
      return node['np'][0].to_i unless node_ignored?(node['name'][0])
    end
    raise CPE14.new
  end

  def get_cluster_size
    get_all_nodes.size
  end

  # Returns job properties for the given job id
  def get_job_details(job_id)
    job_data = parse_qstat(job_id)
    job = job_data['Job'][0]
    jp = JobProperties.new
    jp.id = job_id
    jp.state = job['job_state'][0]
    jp.submit_time = job['ctime'][0].to_i
    jp.nodes = job['Resource_List'][0]['nodect'][0].to_i
    jp.interactive = job['interactive'] != nil
    m_job_data = parse_checkjob(job_id)
    jp.walltime = m_job_data['WCLIMIT'].to_i
    jp.tasks = m_job_data['TASKS'].to_i
    return jp
  end

  def release_job_hold(job)
    pipe = execute_cmd(Settings.instance.get('CMD_RELEASE_HOLD') + " " + job).strip
    if pipe =~ /ERROR.+/
      raise CPE18.new(job, pipe)
    end
    return true
  rescue CPExecError => e
    raise CPE18.new(job).set_cause(e)
  end

  # Returns true or false if node is powered on or off
  def chassis_powered?(node)
    if Settings.instance.get('SIMULATE')
      return node_free?(node)
    else
      node_ipmi = ipmi_node_name(node)
      pipe = execute_cmd(Settings.instance.get('CMD_IPMITOOL') +
          " -I lan -H #{node_ipmi} -U " +
          "#{Settings.instance.get('CMD_IPMITOOL_LOGIN')} -P " +
          "#{Settings.instance.get('CMD_IPMITOOL_PASSWORD')} chassis status").split("\n")
      l = pipe[0].split(":").collect{|t| t.strip}
      return l[0] == "System Power" && l[1] == "on"
    end
  rescue CPExecError => e
    raise CPE21.new(node_ipmi).set_cause(e)
  end

  def short_node_name(node)
    return node.split(".")[0]
  end

  def ipmi_node_name(node)
    _n = node.split(".")
    _n[0] = _n[0] + "-ipmi"
    return _n.join(".")
  end

  # Checks if all ignored nodes are offline and
  # returns 0 if so. Otherwise returns the number of online ignored nodes.
  def ignored_nodes_online
    get_all_nodes_with_ignored.find_all{|n| node_ignored?(n[0])}.find_all { |n| n[1] == "free" }.size
  end

  # Returns nodes that have the given status
  def get_status_nodes (*status)
    nodes = []
    get_all_nodes.each { |n| nodes << n[0] unless (n[1].split(",") & status).empty?}
    return nodes
  end

  # Returns node mac address
  def get_node_mac_address(node)
    pipe = execute_ssh_cmd(node, Settings.instance.get('CMD_IFCONFIG')).split("\n")
    if pipe[0] =~ /.+HWaddr (.+)/
      mac = $1.strip
    end
    raise CPE3.new(node) if !mac
    return mac
  rescue CPExecError => e
    raise CPE4.new(node).set_cause(e)
  end

  # If cluster nodes have Myrinet cards we need to check if gm mapper
  # is started before executing a job.
  def is_gm_mapper_started(nodes, bad_nodes)
    gm_cmd = Settings.instance.get('CMD_MX_INFO')
    return true if gm_cmd.empty?
    gm_output = execute_ssh_cmd(nodes[0], gm_cmd).split("\n")
    gm_output.each do |line|
      if line =~ /^\s+\d+\) ..:..:..:..:..:.. (.+):\d\s+\d,\d/
        nodes.delete($1.strip)
        #puts nodes.inspect
      end
    end
    bad_nodes.push *nodes
    return nodes.empty?
  rescue CPExecError
    return false
  end

  # Returns total number of seconds the node has been up
  def get_uptime(node)
    return execute_ssh_cmd(node, Settings.instance.get('CMD_GET_UPTIME')).split(" ")[0].to_i
  rescue CPExecError => e
    raise CPE23.new(node).set_cause(e)
  end

  # Return true if job has deferred status
  def job_deferred?(job)
    pipe = execute_cmd(Settings.instance.get('CMD_CHECK_JOB') + " " + job)
    return pipe.include?("EState: Deferred")
  rescue CPExecError => e
    raise CPE17.new(job).set_cause(e)
  end

  private

  def node_ignored?(node)
    inodes = Settings.instance.get('IGNORE_NODES').split(",").collect { |n| n.strip}
    return true if inodes.include?(short_node_name(node))
    return true if inodes.include?(node)
    return false
  end

  def execute_ssh_cmd(addr, cmd)
    return execute_cmd(Settings.instance.get('CMD_SSH') + " " + addr + " \"" + cmd + "\"")
  end

  # Returns parsed results from pbsnodes XML output
  def parse_pbsnodes
    return XmlSimple.xml_in(execute_cmd(Settings.instance.get('CMD_PBSNODES') + " -x"))
  rescue CPExecError => e
    raise CPE6.new.set_cause(e)
  rescue REXML::ParseException => e
    raise CPE5.new.set_cause(e)
  end

  # Returns parsed results from qstat XML output for the job
  def parse_qstat(job_id)
    return XmlSimple.xml_in(execute_cmd(Settings.instance.get('CMD_QSTAT') + " -fx " + job_id))
  rescue CPExecError => e
    raise CPE15.new(job_id).set_cause(e)
  rescue REXML::ParseException => e
    raise CPE16.new(job_id).set_cause(e)
  end

  #
  # Calls maui checkjob command
  #
  # returns Hash[attribute, value]
  def parse_checkjob(job_id)
    pipe = execute_cmd(Settings.instance.get('CMD_CHECK_JOB') + " -A " + job_id).strip
    tab = Hash.new
    for pair in pipe.split(";")
      nv = pair.split("=")
      tab[nv[0]] = nv[1]
    end
    return tab
  rescue CPExecError => e
    raise CPE17.new(job_id).set_cause(e)
  end

  # Returns all nodes
  def get_all_nodes_with_ignored
    nodes = []
    nodes_data = parse_pbsnodes
    unless nodes_data.empty?
      for node in nodes_data['Node']
        nodes << [node['name'][0], node['state'][0]]
      end
    end
    return nodes
  end
  
  # Returns a list of all nodes [[node id] [state]] excluding ignored nodes
  def get_all_nodes
    get_all_nodes_with_ignored.find_all{|n| !node_ignored?(n[0])}
  end

end
